package com.dtwave.lnstreaming;

import com.alibaba.fastjson.JSONObject;
import com.dtwave.constant.Constants;
import com.dtwave.lnstreaming.filter.StreamDataSplit;
import com.dtwave.lnstreaming.process.StreamDataExecutor;
import com.dtwave.lnstreaming.source.KafkaSource;
import com.dtwave.param.CheckPointParamObj;
import com.dtwave.param.EsparamObj;
import com.dtwave.param.KafkaParamsObj;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.table.api.TableConfig;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.time.Duration;
import java.util.concurrent.TimeUnit;

public class LoanDetail {

    private final static Logger LOGGER = LoggerFactory.getLogger(LoanDetail.class);
    public static EsparamObj esParamObj;

    public static void main(String[] args) throws Exception {

        LOGGER.info("实时任务启动读取参数信息");
        ParameterTool params = ParameterTool.fromArgs(args);

        //kafka相关参数配置params.get有参数用参数没有参数用默认
        KafkaParamsObj KafkaParams = KafkaParamsObj.builder()
                .dataFrom(params.get("dataFrom", Constants.DATA_FROM))
                .groupId(params.get("groupId", Constants.groupId))
                .oggTopic(params.get("topic", ""))
                .startTimestamp(params.get("startTimestamp", ""))
                .startUpModule(params.get("UpModule", ""))
                .kafkaUrl(params.get("url", ""))
                .build();

        LOGGER.info(KafkaParams.toString());

        //checkpoint相关参数
        CheckPointParamObj cpParamObj = CheckPointParamObj.builder()
                .useCheckpoints(params.getBoolean("useCheckpoints", false))
                .checkpointsOneTimes(params.getInt("checkpointsOneTimes", 600000))
                .minPauseBetweenCheckpoints(params.getInt("minPauseBetweenCheckpoints", 300000))
                .checkpointTimeout(params.getInt("checkpointTimeout", 300000))
                .maxConcurrentCheckpoints(params.getInt("maxConcurrentCheckpoints", 1))
                .checkpointDataUri(params.get("checkpointDataUri", ""))
                .duration(params.getInt("duration", 300))
                .build();

        LOGGER.info(cpParamObj.toString());

        //es参数校验获取
        esParamObj = EsparamObj.builder()
                .esUrl(params.get("esUrl", ""))
                .esIndex(params.get("esIndex", ""))
                .esIndexShards(params.getInt("esIndexShards", 3))
                .esIndexReplicas(params.getInt("esIndexReplicas", 1))
                .esIntervalMs(params.get("esIntervalMs", "100"))
                .esMaxAction(params.get("esMaxAction", "100"))
                .esMaxReties(params.get("esMaxReties", "5"))
                .isQuery(params.getBoolean("isQuery", true))
                .build();
        LOGGER.info(esParamObj.toString());

        LOGGER.info("初始化实时流环境");

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //设置并行度
        env.setParallelism(1);

        //关闭水位线不需要处理迟到数据
        env.getConfig().setAutoWatermarkInterval(0);

        //导入环境全局参数，可以在算子使用中进行调用
        env.getConfig().setGlobalJobParameters(params);

        //初始化checkpoint
        if (cpParamObj.isUseCheckpoints()) {
            LOGGER.info("初始化checkpoint");
            assetCheckpointParam(cpParamObj, env);
        }

        //TableApi
        LOGGER.info("初始化表环境");
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        LOGGER.info("实时流表配置");
        TableConfig config = tableEnv.getConfig();

        LOGGER.info("状态等待时间配置，{}min", cpParamObj.getDuration());
        config.setIdleStateRetention(Duration.ofMinutes(cpParamObj.getDuration()));

        LOGGER.info("准备kafka数据源");
        FlinkKafkaConsumer flinkKafkaConsumer = KafkaSource.kafkaSource(KafkaParams);
        DataStream<String> kafkaDataStream;

        //重新分配并行度数据避免kafka分区造成数据倾斜
        if (params.getBoolean("reblance", true)) {
            LOGGER.info("数据reblance");
            kafkaDataStream = env.addSource(flinkKafkaConsumer).rebalance();
        } else {
            kafkaDataStream = env.addSource(flinkKafkaConsumer);
        }

        LOGGER.info("数据过滤");
        SingleOutputStreamOperator<JSONObject> flatMapStream = StreamDataSplit.fitler(kafkaDataStream);

        LOGGER.info("数据写入");
        StreamDataExecutor.executeFOResult(flatMapStream, esParamObj);

        try {
            env.execute(KafkaParams.getGroupId());
        } catch (Exception e) {
            LOGGER.error("flink任务执行失败");
        }


    }

    private static void assetCheckpointParam(CheckPointParamObj cpParamObj, StreamExecutionEnvironment env) {
        env.setBufferTimeout(0);
        env.getCheckpointConfig().setFailOnCheckpointingErrors(false);
        //每隔五分钟启动一个检查点
        env.enableCheckpointing(cpParamObj.getCheckpointTimeout());
        //设置模式为exactly_once
        /**
         * 	内部:flink的checkpoint
         * 	source:kafka source,可以设置重置偏移量
         * 	sink:kakfa sink 实现了两阶段提交
         * 	具体步骤:
         * 	我们知道Flink由jobmanager管理各个TaskManager进行checkpoint的.
         * 	1.jobmanager会启动checkpoint coordinator,向所有的source 触发checkpoint
         * 	2.所有source会向所有的task广播barrier,barrier会在算子间传递下去
         * 	3.当每个算子对当前状态都做了快照,保存在转台后端了,source就会把当前offset作为状态保存起来,下次checkpoint恢复是,重新提交偏移量,从上次保存的位置开始重写消费数据
         * 	4.每个算子遇到barrier时,都会把状态存到checkpoint中
         * 	5.sink任务当第一条数据来到时,会开启一个事务,把数据写入kafka中,此时是预提交,数据不能被消费,当遇到 barrier 时，把状态保存到状态后端，并开启新的预提交事务。
         *     当所有算子任务的快照完成，也就是这次的 checkpoint 完成时，JobManager 会向所有任务发通知，确认这次 checkpoint 完成。
         *     当sink 任务收到确认通知，就会正式提交之前的事务，kafka 中未确认的数据就改为“已确认”，数据就真正可以被消费了。
         *     执行过程实际上是一个两段式提交，每个算子执行完成，会进行“预提交”，直到执行完sink操作，会发起“确认提交”，如果执行失败，预提交会放弃掉。
         *
         *     具体总结:
         *     1.当第一条数据来了之后,开启一个kafka事务,正常写入kafka的分区中,但是此时数据被标记为预提交,不能被消费
         *     2.jobmanager触发了checkpoint操作,barrier从source向下游传递,算子遇到barrier就会将状态存入状态后端,并且会通知Jobmanager
         *     3.sink接收到barrier后,保存当前状态,存储checkpoint中,通知Jobmanager开启一阶段的事务,用于提交下阶段的数据
         *     4.jobmanager接收到所有任务的通知后,发出确认信息,表示此次checkpoint完成.
         *     5.当sink接收到了jobmanager的确定信息后,就会正式提交事务.
         *     6.外部kafka就会关闭该阶段的事务,数据就可以被消费了
         */
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        //确保检查点之间至少有5min间隔
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(cpParamObj.getMinPauseBetweenCheckpoints());
        //检查点必须在5min内完成不然被丢弃
        env.getCheckpointConfig().setCheckpointTimeout(cpParamObj.getCheckpointTimeout());
        //同一时间只允许进行一个检查点
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(cpParamObj.getMaxConcurrentCheckpoints());
        //flink程序被cancel（主动取消）后会保留checkpoint数据
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //状态保存方式为RocksDB
        try {
            env.setStateBackend(new RocksDBStateBackend(cpParamObj.getCheckpointDataUri()));
        } catch (Exception e) {
            LOGGER.error("check 存往hdfs失败，失败信息为[{}],url[{}]", e, cpParamObj.getCheckpointDataUri());
        }
        //重启策略固定间隔，尝试重启三次，每次间隔十秒
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, Time.of(10, TimeUnit.SECONDS)));
    }

}
