package com.atguigu.flink.datastreamapi.sink;

import com.alibaba.fastjson.JSON;
import com.atguigu.flink.function.WaterSensorMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;

/**
 * Created by Smexy on 2022/12/14
 *
 *  FlinkKafkaProducer : 生产数据到kafka.
 *          ProducerRecord不需要key
 *
 *          测试集群允许自动创建topic!
 *                  要写入的topic不存在，会自动创建。
 *                      自动创建的分区数和副本取决于broker的设置。
 */
public class Demo2_KafkaSink
{
    public static void main(String[] args) {
        
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        /*
            FlinkKafkaProducer(
            String brokerList,  : 集群地址
            String topicId:     向哪个topic写入
             SerializationSchema<IN> serializationSchema:  序列化器
                    IN: 要写入的ProducerRecord中的value的类型。
                            一般都选择String(json)
             )
         */
        FlinkKafkaProducer<String> flinkKafkaProducer = new FlinkKafkaProducer<>("hadoop102:9092", "topicB", new SimpleStringSchema());

        SingleOutputStreamOperator<String> ds = env
            .socketTextStream("hadoop103", 8888)
            .map(new WaterSensorMapFunction())
            .map(ws -> JSON.toJSONString(ws));

        //写出
        ds.addSink(flinkKafkaProducer);

        try {
                            env.execute();
                        } catch (Exception e) {
                            e.printStackTrace();
                        }
        
    }
}
