package cn.itcast.flink.connector;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.util.Properties;

/**
 * Author itcast
 * Date 2022/1/13 15:58
 * Desc 实现socket 读取数据并做wordcount 并将结果写入到 Kafka
 */
public class FlinkKafkaWriter {
    public static void main(String[] args) throws Exception {
        //todo 获取流执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //todo 设置并行度
        env.setParallelism(1);
        //todo 通过socket获取数据
        DataStreamSource<String> source = env.socketTextStream("node1", 9999);
        //todo 对数据做 wordcount
        SingleOutputStreamOperator<Tuple2<String, Integer>> reuslt = source.flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
            @Override
            public void flatMap(String value, Collector<Tuple2<String, Integer>> out) throws Exception {
                String[] words = value.split(" ");
                for (String word : words) {

                    out.collect(Tuple2.of(word, 1));
                }
            }
        }).keyBy(t -> t.f0)
                .sum(1);
        //todo 设置Flink生产Kafka相关参数
        //生产的 kafka 的地址 bootstrap.servers
        Properties props = new Properties();
        props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "node1:9092,node2:9092,node3:9092");
        //todo 实例化 FlinkKafkaProducer
        FlinkKafkaProducer<String> producer = new FlinkKafkaProducer<>(
                "writetopic",
                new KafkaSerializationSchema<String>() {
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(String element, @Nullable Long timestamp) {
                        return new ProducerRecord("writetopic", element.getBytes());
                    }
                },
                props,
                FlinkKafkaProducer.Semantic.AT_LEAST_ONCE
        );
        //todo 数据.addSink
        reuslt
                .map(new MapFunction<Tuple2<String, Integer>, String>() {
                    @Override
                    public String map(Tuple2<String, Integer> value) throws Exception {
                        return value.f0 + ":" + value.f1;
                    }
                })
                .addSink(producer);
        //todo 执行流环境
        env.execute();
    }
}
