package com.desheng.bigdata.flink.stream.sink

import java.util.Properties

import com.desheng.bigdata.flink.deserialization.WorkerDeSerializationSchema
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer010

/**
  * 自定义kafka-sink
  */
object _01Sink2KafkaOps {

    def main(args: Array[String]): Unit = {
        val env = StreamExecutionEnvironment.getExecutionEnvironment

        val workers: DataStream[Worker] = env.fromCollection(List(
            Worker(1, "吴延俊", 25, "研发部"),
            Worker(2, "宋建华", 26, "产品部"),
            Worker(3, "崔蒙恩", 58, "小卖部"),
            Worker(4, "小岚岚", 48, "小卖部"),
            Worker(5, "成思远", 68, "后勤部"),
            Worker(6, "李向闪", 26, "产品部")
        ))

        // write workers into kafka topic
        val topic = "worker"
        val properties = new Properties()
        properties.put("bootstrap.servers", "bigdata01:9092,bigdata02:9092,bigdata03:9092")
        properties.put("acks", "1")
        properties.put("linger.ms", "1")

        workers.addSink(createKafkaProducer(topic, properties))
        env.execute()
    }

    def createKafkaProducer(topic: String, properties: Properties): FlinkKafkaProducer010[Worker] = {
        val schema = new WorkerDeSerializationSchema
        new FlinkKafkaProducer010[Worker](topic, schema, properties)
    }
}
case class Worker(id: Int, name: String, age: Int, dept: String) {
    override def toString: String = id + "," + name + "," + age + "," + dept
}
