package com.wuwangfu.utils;

import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;

import java.util.Arrays;
import java.util.List;
import java.util.Properties;

/**
 * @Description：
 * @Author：jcshen
 * @Date：2023-07-02
 *
 */
public class FlinkUtils {

    public static final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    public static <T> DataStream<T> createKafkaStream(ParameterTool params, Class<? extends DeserializationSchema<T>> clazz) throws Exception {
        String chkPath = params.getRequired("checkpoint.path");
        //没有传递，默认30s
        long chkInterval = params.getLong("checkpoint.interval", 30000);

        //开启checkpointing
        env.enableCheckpointing(chkInterval, CheckpointingMode.EXACTLY_ONCE);
        env.setStateBackend(new FsStateBackend(chkPath));

        //设置Kafka相关参数
        Properties properties = new Properties();
        //设置Kafka地址和端口
        properties.setProperty("bootstrap.servers",params.getRequired("bootstrap.servers"));
        //设置偏移量策略：如果没有记录偏移量，就从头读，如果记录过偏移量，就接着读；没有传入就使用默认值 earliest
        properties.setProperty("auto.offset.reset",params.get("auto.offset.reset","earliest"));
        //设置消费者组
        properties.setProperty("group.id",params.getRequired("group.id"));
        //开启checkpoint，不让flink提交偏移量的消费者定期自动提交偏移量
        properties.setProperty("enable.auto.commit",params.get("enable.auto.commit","false"));
        //设置Kafka的topics
        String topics = params.getRequired("kafka.topics");
        List<String> topicList = Arrays.asList(topics.split(","));

        //FlinkKafkaConsumer并传入相关参数
        FlinkKafkaConsumer<T> kafkaConsumer = new FlinkKafkaConsumer<>(
                topicList,//topics名称
                clazz.newInstance(),//反序列化schema
                properties//传入Kafka参数
        );
        //在checkpoint时，不将偏移量写入Kafka的特殊topic中
        kafkaConsumer.setCommitOffsetsOnCheckpoints(params.getBoolean("commit.offsets.on.checkpoints",false));

        return env.addSource(kafkaConsumer);
    }

}
