package cn.itcast.streaming.task;

import cn.itcast.streaming.utils.ConfigLoader;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer011;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.Properties;

/**
 * 1）初始化flink运行环境
 * 2）flink接入kafka数据源消费数据
 * 3)定义数据处理方法（不实现）
 * 4)定义数据存储方法（不实现）
 */
public abstract class BaseTask {
    //定义log的实例
    private static Logger logger = LoggerFactory.getLogger(BaseTask.class);

    public static StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

    public static String appName;

    public static ParameterTool parameterTool;
    //加载配置文件数据
    static {
        try {
            parameterTool = ParameterTool.fromPropertiesFile(
                    BaseTask.class.getClassLoader().getResourceAsStream("conf.properties"));
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    public static StreamExecutionEnvironment getEnv(String className){
        System.setProperty("HADOOP_USER_NAME","root");
        env.getConfig().setGlobalJobParameters(parameterTool);
        //todo 2.按照事件时间处理数据
        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
        //todo 3.开启checkpoint
        //todo 3.1)每隔30s一个周期开启，时间不能太短，也不能太长（数据丢失）
        env.enableCheckpointing(1000*30L);
        //todo 3.2）设置检查点mode,exactly-once,保证数据一次性语义
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        //todo 3.2）设置两次checkpoint的时间间隔，避免两次间隔太近，导致频繁checkpoi
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(20*1000L);
        //todo 3.4）设置超时时间，
        env.getCheckpointConfig().setCheckpointTimeout(20*1000L);
        //todo 3.5）设置最大尝试次数，同一个时间有几个checkpoint在运行
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        //todo 3.6）设置job取消的时候，保留checkpoint计算结果
        env.getCheckpointConfig().enableExternalizedCheckpoints(
                CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);//保留撤销
        //todo 3.7）作业作业运行过程中，如果checkpoint失败，是否作业也失败
        env.getCheckpointConfig().setFailOnCheckpointingErrors(false);
        //todo 3.8）设置存储后端，使用rocksdb作为状态后端
        //String path = parameterTool.getRequired("hdfsUri");
        String path = ConfigLoader.getProperty("hdfsUri");

        try {
            env.setStateBackend(new RocksDBStateBackend(path+"/flink/checkpoint/"+className)); //hdfsUri=hdfs://node01:8020
        } catch (IOException e) {
            e.printStackTrace();
        }
        //todo 4.设置重启策略（固定延迟，失败率重启策略，无重启策略）
        //todo 4.1）默认重启策略：如果开启checkpoint的话，不停的重启，没有开启无重启策略
        env.setRestartStrategy(RestartStrategies.noRestart());

        appName = className;
        return env;
    }

    public static <T> DataStream<T> createKafkaStream(Class<? extends DeserializationSchema> clazz){
        DataStream<T> dataStreamSource = null;
        try {
            //todo 5.创建flink消费kafka的对象，指定参数信息
            Properties prop = new Properties();
            //todo 5.1）集群地址
            prop.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,parameterTool.getRequired("bootstrap.servers"));
            //todo 5.2）消费者组id
            prop.setProperty(ConsumerConfig.GROUP_ID_CONFIG,"group"+appName);
            //todo 5.3）kafka的分区感知（kafka011版本的粘性机制不需要开启分区感知）
            //todo 5.4）设置key ,value 反序列
            //todo 5.5）是否自动提交偏移量
            prop.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,parameterTool.get("enable.auto.reset","earliest"));
            //todo 5.6）创建kafka消费者实例
            FlinkKafkaConsumer011<T> consumer = new FlinkKafkaConsumer011<>(
                    parameterTool.getRequired("kafka.topic"),
                    clazz.newInstance(),
                    prop
            );
            //todo 5.7）设置自动递交offset保存到检查点
            consumer.setCommitOffsetsOnCheckpoints(true);
            //todo 6.将kafka消费者对象添加到环境中
            dataStreamSource = env.addSource(consumer);
        }catch (Exception e) {
            e.printStackTrace();
        }

        return dataStreamSource;
    }

    public void process(){}

    private void save(){}

}
