package com.atguigu.flink.chapter05.sink;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.nio.charset.StandardCharsets;
import java.util.Properties;

/**
 * @Author lzc
 * @Date 2023/6/20 09:45
 */
public class KafkaSinkDemoOld {
    public static void main(String[] args) {
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 2000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(1);
        
        SingleOutputStreamOperator<String> resultStream = env
            .socketTextStream("hadoop162", 8888)
            .flatMap(new FlatMapFunction<String, Tuple2<String, Long>>() {
                @Override
                public void flatMap(String value, Collector<Tuple2<String, Long>> out) throws Exception {
                    for (String word : value.split(" ")) {
                        out.collect(Tuple2.of(word, 1L));
                    }
                }
            })
            .keyBy(t -> t.f0)
            .sum(1)
            .map(t -> t.f0 + "_" + t.f1);
        
        
        /*resultStream.addSink(
            new FlinkKafkaProducer<String>(
                "hadoop162:9092,hadoop163:9092",
                "s2",
                new SimpleStringSchema()
            )
        );*/
        
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "hadoop162:9092,hadoop164:9092");
        resultStream.addSink(new FlinkKafkaProducer<String>(
            "default",
            new KafkaSerializationSchema<String>() {
                @Override
                public ProducerRecord<byte[], byte[]> serialize(String element,
                                                                @Nullable Long timestamp) {
                    if (element != null) {
                        return new ProducerRecord<>("s2", element.getBytes(StandardCharsets.UTF_8));
                    }
                    return null;
                }
            },
            props,
            FlinkKafkaProducer.Semantic.AT_LEAST_ONCE
        ));
        
        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
