package cn.xuexiyuan.flinkstudy.connectors;

import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;

import java.util.Properties;


/**
 * @Description: Kafka 生产者 Demo
 * @Author 左龙龙
 * @Date 21-3-24
 * @Version 1.0
 **/
public class KafkaProducerDemo {

    public static void main(String[] args) throws Exception {
        // 0.env
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 1.source
        // 准备 kafka 连接参数
        Properties properties = new Properties();
        // 集群地址
        properties.setProperty("bootstrap.servers", "localhost:9092");
        // 消费组
        properties.setProperty("group.id", "flink_study");
        // 有 offset 消费记录则从记录位置继续消息, 没有消费记录则从最新的（最后的）消息开始消费
        properties.setProperty("auto.offset.reset", "latest");
        // 会开启一个后台线程每隔5秒检查一下 kafka 的分区情况, 实现动态分区检测
        properties.setProperty("flink.partition-discovery.interval-millis", "5000");
        // 自动提交
        properties.setProperty("enable.auto.commit", "true");
        // 自动提交的时间间隔
        properties.setProperty("auto.commit.interval.ms", "2000");


        FlinkKafkaConsumer<String> kafkaSource = new FlinkKafkaConsumer<>("flink_study_topic", new SimpleStringSchema(), properties);
        DataStream<String> kafkaDS = env.addSource(kafkaSource);

        // 2.transformation
        SingleOutputStreamOperator<String> filterDS = kafkaDS.filter(new FilterFunction<String>() {
            @Override
            public boolean filter(String s) throws Exception {
                return s.contains("success");
            }
        });


        // 3.sink
        kafkaDS.print();

        // 准备 kafka 连接参数
        Properties properties2 = new Properties();
        // 集群地址
        properties2.setProperty("bootstrap.servers", "localhost:9092");
        FlinkKafkaProducer<String> kafkaProducer = new FlinkKafkaProducer<>("flink_study_topic2", new SimpleStringSchema(), properties2);
        filterDS.addSink(kafkaProducer);

        filterDS.print();


        // 4.excute
        env.execute();
    }

}
