package com.atguigu.flink.chapter05.sink;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

public class KafkaSinkDemo {
    public static void main(String[] args) {
        Configuration conf = new Configuration();
        conf.setInteger("rest.port",2000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(1);

        SingleOutputStreamOperator<String> resultStream = env.socketTextStream("hadoop102", 8888)
                .flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
                    @Override
                    public void flatMap(String value,
                                        Collector<Tuple2<String, Integer>> out) throws Exception {
                        String[] words = value.split(" ");
                        for (String word : words) {
                            out.collect(Tuple2.of(word, 1));
                        }
                    }
                })
                .keyBy(t -> t.f0)
                .sum(1)
                .map(t -> t.f0 + "_" + t.f1);

        KafkaSink<String> kafkaSink = KafkaSink.<String>builder()
                //kafka地址
                .setBootstrapServers("hadoop102:9092,hadoop103:9092,hadoop104:9092")
                .setRecordSerializer( //设置序列化
                        KafkaRecordSerializationSchema.<String>builder()
                                .setTopic("s2")
                                .setValueSerializationSchema(new SimpleStringSchema()) //序列化器
                                .build()
                )
                .setDeliveryGuarantee(DeliveryGuarantee.AT_LEAST_ONCE)  //一致性保证
                .build();
        resultStream.sinkTo(kafkaSink);

        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}


//public class KafkaSinkDemo {
//    public static void main(String[] args) {
//        Configuration conf = new Configuration();
//        conf.setInteger("rest.port",2000);
//        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
//        env.setParallelism(1);
//
//        SingleOutputStreamOperator<String> sink = env.socketTextStream("hadoop102", 8888)
//                .flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
//                    @Override
//                    public void flatMap(String s,
//                                        Collector<Tuple2<String, Integer>> out) throws Exception {
//                        String[] words = s.split(" ");
//                        for (String word : words) {
//                            out.collect(Tuple2.of(word, 1));
//                        }
//                    }
//                })
//                .keyBy(t -> t.f0)
//                .sum(1)
//                .map(t -> t.f0 + " _ " + t.f1);
//
//        KafkaSink<String> kafkaSink = KafkaSink.<String>builder()
//                .setBootstrapServers("hadoop102:9092,hadoop103:9092,hadoop104:9092")
//                .setRecordSerializer( //设置序列化
//                        KafkaRecordSerializationSchema.<String>builder()
//                                .setTopic("s2")
//                                .setValueSerializationSchema(new SimpleStringSchema()) //设置序列化器
//                                .build()
//                )
//                .setDeliveryGuarantee(DeliveryGuarantee.AT_LEAST_ONCE)  //一致性保证
//                .build();
//
//        sink.sinkTo(kafkaSink);
//
//        try {
//            env.execute();
//        } catch (Exception e) {
//            e.printStackTrace();
//        }
//    }
//}

//public class KafkaSinkDemo {
//    public static void main(String[] args) {
//        Configuration conf = new Configuration();
//        conf.setInteger("rest.port",2000);
//        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
//        env.setParallelism(1);
//
//        SingleOutputStreamOperator<String> resultSink = env.socketTextStream("hadoop102", 8888)
//                .flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
//                    @Override
//                    public void flatMap(String s,
//                                        Collector<Tuple2<String, Integer>> out) throws Exception {
//                        String[] words = s.split(" ");
//                        for (String word : words) {
//                            out.collect(Tuple2.of(word, 1));
//                        }
//
//                    }
//                })
//                .keyBy(t -> t.f0)
//                .sum(1)
//                .map(t -> t.f0 + "_" + t.f1);
//
//        KafkaSink<String> kafkaSink = KafkaSink.<String>builder()
//                //kafaka地址
//                .setBootstrapServers("hadoop102:9092,haoop103:9092,hadoop104:9092")
//                .setRecordSerializer( //设置序列化
//                        KafkaRecordSerializationSchema.<String>builder()
//                                .setTopic("s2")
//                                .setValueSerializationSchema(new SimpleStringSchema())  //设置序列化器
//                                .build())
//                .setDeliveryGuarantee(DeliveryGuarantee.AT_LEAST_ONCE)   //一致性保证
//                .build();
//
//        resultSink.sinkTo(kafkaSink);
//
//        try {
//            env.execute();
//        } catch (Exception e) {
//            e.printStackTrace();
//        }
//    }
//}