package com.study.flink.java.utils;


import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;

import java.util.Arrays;
import java.util.Properties;

/**
 * Flink工具类
 * @author linys
 * @since 1.0.1
 */
public class FlinkUtils {

    // flink stream上下文
    private static final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

    /**
     * 从kafka里面读取数据
     * @param parameters
     * @param clazz
     * @return
     */
    public static <T> DataStream<T> createKafkaStream(ParameterTool parameters, Class<? extends DeserializationSchema<T>> clazz) throws IllegalAccessException, InstantiationException {

        //设置全局配置参数(以后flink的上下文环境都有这个配置)
        env.getConfig().setGlobalJobParameters(parameters);
        // 并行度默认4
        env.setParallelism(parameters.getInt("parallelism", 4));
        // 开启Checkpointing，同时开启重启策略，设置checkpoint的模式
        env.enableCheckpointing(parameters.getLong("checkpoint-interval", 5000L), CheckpointingMode.EXACTLY_ONCE);
        // 取消任务checkpoint不删除
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        // 设置重启策略
        env.getConfig().setRestartStrategy(org.apache.flink.api.common.restartstrategy.RestartStrategies.fixedDelayRestart(2, 2000));
        // 设置StateBackend(以后在集群环境中设置成hdfs)
        env.setStateBackend(new FsStateBackend("file:///D:\\code\\IDEA\\flink-study\\dir\\day07\\backend\\523054fd67832f90144e2130db794463\\chk-11"));

        // kafka的partitions有3个，对应有3个source
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", parameters.getRequired("bootstrap.servers")); //kafka的broker地址
        props.setProperty("group.id", parameters.getRequired("group.id"));//指定组ID
        props.setProperty("auto.offset.reset", parameters.get("auto.offset.reset","earliest"));//如果没有记录偏移量，第一次从最开始消费
        props.setProperty("enable.auto.commit", parameters.get("enable.auto.commit","false"));//自己管理偏移量
        String topics = parameters.getRequired("topics");

        // 用kafka的并行source，每一个组都要满足条件才会触发
        FlinkKafkaConsumer<T> kafkaSource = new FlinkKafkaConsumer<T>(Arrays.asList(topics.split(",")), clazz.newInstance(), props);

        // Flink Checkpoint成功后还要向kafka特殊的topic中写入偏移量
        // 特殊topic有两个作用：1.监控用，2.任务重启恢复数据用，如果没有指定savePoint，则从特殊topic中的偏移量恢复消费数据
        kafkaSource.setCommitOffsetsOnCheckpoints(parameters.getBoolean("commit-offsets-on-checkpoints", true));

        // source
        return env.addSource(kafkaSource);
    }

    /**
     * 从kafka某个配置的topic消费数据
     * @param parameters
     * @param topic
     * @param gid
     * @param clazz
     * @return
     */
    public static DataStream<String> createKafkaStream(ParameterTool parameters, String topic, String gid, Class<? extends DeserializationSchema<String>> clazz) throws IllegalAccessException, InstantiationException {
        //设置全局配置参数(以后flink的上下文环境都有这个配置)
        env.getConfig().setGlobalJobParameters(parameters);
        // 并行度默认4
        // env.setParallelism(parameters.getInt("parallelism", 4));
        // 开启Checkpointing，同时开启重启策略，设置checkpoint的模式
        env.enableCheckpointing(parameters.getLong("checkpoint-interval", 5000L), CheckpointingMode.EXACTLY_ONCE);
        // 取消任务checkpoint不删除
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        // 设置重启策略
        env.getConfig().setRestartStrategy(org.apache.flink.api.common.restartstrategy.RestartStrategies.fixedDelayRestart(2, 2000));
        // 设置StateBackend(以后在集群环境中设置成hdfs)
        env.setStateBackend(new FsStateBackend("file:///D:\\code\\IDEA\\flink-study\\dir\\day07\\backend\\523054fd67832f90144e2130db794463\\chk-11"));

        // kafka的partitions有3个，对应有3个source
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", parameters.getRequired("bootstrap.servers")); //kafka的broker地址
        props.setProperty("group.id", gid);//指定组ID
        props.setProperty("auto.offset.reset", parameters.get("auto.offset.reset","earliest"));//如果没有记录偏移量，第一次从最开始消费
        props.setProperty("enable.auto.commit", parameters.get("enable.auto.commit","false"));//自己管理偏移量

        // 用kafka的并行source，每一个组都要满足条件才会触发
        FlinkKafkaConsumer<String> kafkaSource = new FlinkKafkaConsumer<String>(Arrays.asList(topic), clazz.newInstance(), props);

        // Flink Checkpoint成功后还要向kafka特殊的topic中写入偏移量
        // 特殊topic有两个作用：1.监控用，2.任务重启恢复数据用，如果没有指定savePoint，则从特殊topic中的偏移量恢复消费数据
        kafkaSource.setCommitOffsetsOnCheckpoints(parameters.getBoolean("commit-offsets-on-checkpoints", true));

        // source
        return env.addSource(kafkaSource);
    }

    public static StreamExecutionEnvironment getEnv() {
        return env;
    }


}
