package cn.itcast.streaming.task;

import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.OutputFileConfig;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.bucketassigners.DateTimeBucketAssigner;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;

import java.io.IOException;
import java.util.Properties;

import static org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumerBase.KEY_PARTITION_DISCOVERY_INTERVAL_MILLIS;

/**
 * Author itcast
 * Date 2021/9/20 16:29
 * Desc 创建常用的获取流环境、获取读取kafka集群的数据、写入到hdfs
 * 开发步骤：
 * 1.读取本地配置文件 key-value
 * 2.抽象出来获取当前流环境
 * 3.抽取读取kafka中的数据流
 */
public abstract class BaseTask {
    //1.读取本地配置文件 key-value
    //todo 设置参数，读取 conf.properties 的配置文件
    static ParameterTool parameterTool = null;

    static {
        try {
            parameterTool = ParameterTool.fromPropertiesFile(
                    KafkaSourceDataTask.class.getClassLoader()
                            .getResourceAsStream("conf.properties")
            );
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    //2.抽象出来获取当前流环境
    protected static StreamExecutionEnvironment getEnv(String taskName) {
        //模拟当前用户root或hdfs读写hdfs集群
        System.setProperty("HADOOP_USER_NAME", "root");
        //todo 1.创建流执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //设置当前任务的全局参数可见
        env.getConfig().setGlobalJobParameters(parameterTool);
        //todo 2.设置并行度
        //todo ①配置文件并行度设置 parallism.default ②客户端设置 flink run -p 2
        //todo ③在程序中 env.setParallelism(2) ④算子上并行度（级别最高）
        env.setParallelism(1);
        //todo 设置当前程序的时间属性
        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
        //todo 3.开启checkpoint及相应的配置，最大容忍次数，最大并行checkpoint个数，checkpoint间最短间隔时间，checkpoint的最大
        env.enableCheckpointing(30 * 1000L);
        env.getCheckpointConfig().enableExternalizedCheckpoints(
                CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
        );
        env.getCheckpointConfig().setCheckpointInterval(500L);
        //todo 容忍的超时时间，checkpoint如果取消是否删除checkpoint 等
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(10);
        //创建检查点必须保证仅一次语义
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        //设置最大的checkpoint的并发度
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        //设置检查点最大的超时时间
        env.getCheckpointConfig().setCheckpointTimeout(60 * 1000L);
        //设置如果有多个checkpoint，两个checkpoint之间为500毫秒
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500L);
        //设置后端保存的位置 RocksDBStateBackend 内嵌的数据库,将state保存到数据库中，异步刷写到hdfs上
        try {
            env.setStateBackend(new RocksDBStateBackend(
                    parameterTool.get("hdfsUri") + "/flink-checkpoints/" + taskName,
                    false
            ));
        } catch (IOException e) {
            e.printStackTrace();
        }

        //todo 4.开启重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(
                3,
                Time.seconds(10)
        ));
        return env;
    }
    //3.抽取读取kafka中的数据流

    /**
     * 通过流执行环境、
     * 消费者组、反序列化的方式获取DataStreamSource对象
     *
     * @param env
     * @param groupid
     * @param clazz
     * @param <T>
     * @return
     */
    protected static <T> DataStreamSource<T> getKafkaStream(StreamExecutionEnvironment env,
                                                            String groupid,
                                                            Class<? extends DeserializationSchema> clazz
    ) {
        //todo 5. 读取kafka中的数据
        //todo 5.2 配置参数
        Properties props = new Properties();
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, parameterTool
                .getRequired("bootstrap.servers"));
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupid);
        //根据指定时间进行分区发现的策略
        props.setProperty(KEY_PARTITION_DISCOVERY_INTERVAL_MILLIS, parameterTool
                .getRequired("key.partition.discovery.interval.millis"));

        //todo 5.1 设置 FlinkKafkaConsumer
        FlinkKafkaConsumer<T> consumer = null;
        try {
            consumer = new FlinkKafkaConsumer<T>(
                    parameterTool.getRequired("kafka.topic"),
                    clazz.newInstance(),
                    props
            );
        } catch (InstantiationException e) {
            e.printStackTrace();
        } catch (IllegalAccessException e) {
            e.printStackTrace();
        }

        //todo 5.3 消费 kafka 的offset 提交给 flink 来管理
        consumer.setCommitOffsetsOnCheckpoints(true);
        //读取每个分区的offset
        consumer.setStartFromGroupOffsets();
        //todo 6 env.addSource
        DataStreamSource<T> source = env.addSource(consumer);
        return source;
    }

    /**
     * 将数据流写入到 hdfs
     *
     * @param prefix
     * @param suffix
     * @param path
     * @param bucketAssignerFormat
     * @return
     */
    public static StreamingFileSink<String> getSink(
            String prefix,
            String suffix,
            String path,
            String bucketAssignerFormat

    ) {
        OutputFileConfig fileConfig = OutputFileConfig.builder()
                .withPartPrefix(prefix)
                .withPartSuffix(suffix)
                .build();
        StreamingFileSink fileSink = StreamingFileSink
                .forRowFormat(
                        new Path(parameterTool.getRequired("hdfsUri") + "/apps/hive/warehouse/ods.db/" + path),
                        new SimpleStringEncoder<String>("utf-8")
                ).withBucketAssigner(
                        new DateTimeBucketAssigner(bucketAssignerFormat)
                ).withRollingPolicy(
                        DefaultRollingPolicy.builder()
                                .withRolloverInterval(10000L)
                                .withInactivityInterval(3000L)
                                .withMaxPartSize(64 * 1024 * 1024)
                                .build()
                ).withOutputFileConfig(fileConfig)
                .build();
        //返回
        return fileSink;
    }

}
