package cn.gwm.flink.streaming.task;

import cn.gwm.flink.streaming.factory.DataHubSourceFactory;
import cn.gwm.flink.streaming.function.process.OriginalDataProcessFunction;
import cn.gwm.flink.streaming.ods.OdsHandle;
import cn.gwm.flink.streaming.ods.model.StandardModelConf;
import cn.gwm.flink.streaming.sink.kafka.FlinkKafkaUtil;
import cn.gwm.utils.ConfigLoader;
import cn.gwm.utils.DateTimeUtil;
import cn.hutool.db.Db;
import cn.hutool.db.ds.DSFactory;
import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.OutputTag;

import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;

/**
 * ods 层接收合并信号任务
 * @ClassName OdsThTask
 * @Author xzh
 * @Date 2022/12/15 15:46
 **/
public class OdsTask {

    public static void main(String[] args) throws Exception {
        ConfigLoader.init(args);
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 3.1）设置每隔30s一个周期开启checkpoint（checkpoint开启周期，不能太长也不能太短：1-5分钟）
        setEnv(env);
//        env.setParallelism(1);
        List<StandardModelConf> confList = new ArrayList<>();
        try {
            Db.use(DSFactory.get(ConfigLoader.get("mysql.group.db.config")))
                    .find(StandardModelConf.getEntityWhere(), StandardModelConf.class).stream()
                    .filter(data ->"0".equals(data.getDeleted())&&"OdsTaskP03".equals(data.getTaskName()))
                    .forEach(data -> confList.add(data));
        } catch (SQLException e) {
            throw new RuntimeException(e);
        }
        Map<String, OutputTag<String>> canTagMap = new ConcurrentHashMap<>(16);
        for(StandardModelConf conf:confList){
            canTagMap.put(conf.getModelName(), new OutputTag<String>(conf.getVehicleType()) {});
        }
        SingleOutputStreamOperator<String> process =  env.addSource(DataHubSourceFactory.getDatahubSourceFunction(ConfigLoader.get("dataHub.topic.all"),
                        ConfigLoader.get("dataHub.all.subId"), DateTimeUtil.defaultFormatPare(ConfigLoader.get("all.receive.endTime"))))
                .process(new OriginalDataProcessFunction(canTagMap)).returns(Types.STRING).uid("ods_source_id");
        OdsHandle.handle(process,canTagMap,confList);
//        process.addSink(FlinkKafkaUtil.toKafka("ods_can_status_data")).name("toKafkaDefault");
        env.execute("OdsTask");
    }

    private static void setEnv(StreamExecutionEnvironment env) {
        env.enableCheckpointing(300 * 1000L);
        // 启动类型
        env.setRuntimeMode(RuntimeExecutionMode.STREAMING);
        // 3.2）设置检查点的mode，exactly-once，保证数据的一次性语义
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 3.3）设置两次checkpoint之间的时间间隔，避免两次间隔时间太近导致频繁的checkpoint
        env.getCheckpointConfig().setCheckpointInterval(300*1000L);
        // 3.4）设置checkpoint的超时时间（一般设置一个checkpoint周期的二分之一）
        env.getCheckpointConfig().setCheckpointTimeout(600 * 1000L);
        // 3.5）设置checkpoint的最大尝试次数，同一个时间内有几个checkpoint在运行
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(5000L);
        // 3.6）设置job作业取消的时候，是否保留checkpoint的计算结果
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        // 3.7）设置job作业运行过程中，如果checkpoint失败，是否停止作业
        env.getCheckpointConfig().setFailOnCheckpointingErrors(true);
        // 3.8）设置checkpoint的存储后端，使用rocksdb作为状态后端
        env.setStateBackend(new EmbeddedRocksDBStateBackend());
        // 设置可容忍的检查点的失败数
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(3);
        //启用不对齐检查点保存方式
        env.getCheckpointConfig().enableUnalignedCheckpoints();
        env.getCheckpointConfig().setCheckpointStorage(ConfigLoader.get("hdfsUri") + "/flink/checkpoints/" + "OdsTask");
        env.setRestartStrategy(RestartStrategies.failureRateRestart(10, Time.minutes(15),Time.seconds(10)));
    }


}
