package cn.gwm.flink.streaming.task;

import cn.gwm.flink.streaming.constant.ChargePredictFields;
import cn.gwm.flink.streaming.sink.kafka.FlinkKafkaUtil;
import cn.gwm.flink.streaming.strategy.vehiclepredict.ChargeBucketAssigner;
import cn.gwm.flink.streaming.strategy.vehiclepredict.Data2HiveFormat;
import com.alibaba.fastjson.JSONObject;
import org.apache.commons.lang3.time.DateFormatUtils;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringEncoder;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.core.fs.Path;
import org.apache.flink.runtime.checkpoint.CheckpointFailureManager;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.filesystem.StreamingFileSink;
import org.apache.flink.streaming.api.functions.sink.filesystem.rollingpolicies.DefaultRollingPolicy;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.kafka.clients.consumer.ConsumerConfig;

import java.util.List;
import java.util.Properties;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

/**
 * @author : GW00280745
 * @date : 2023/1/29 15:48
 * @description :
 */
public class ChargePredictDataOutputDwsTask {

    /**
     *  private static String srcTopic = "dws_multidimensional_predict";
     */
    private static String srcTopic = "dws_multidimensional_predict_obc_validation";

    /*
        ali private String kafkaServer = "10.31.8.112:9092,10.31.8.113:9092,10.31.8.114:9092";
     */
    private String kafkaServer = "bd-zcpt-bd-zcpt-test-000001:9092,bd-zcpt-bd-zcpt-test-000002:9092,bd-zcpt-bd-zcpt-test-000003:9092,bd-zcpt-bd-zcpt-test-000004:9092,bd-zcpt-bd-zcpt-test-000005:9092";
    /**
     * private String checkPointDir="hdfs://bd-zcpt-bd-zcpt-test-000001:8020/external/data/dws/dws_multidimensional_charge_cp";
 *     private String hdfsPath = "hdfs://bd-zcpt-bd-zcpt-test-000001:8020/external/data/dws/dws_multidimensional_charge_data";
     *
     *     private String checkPointDir="hdfs://nameservice1/external/data/dws/dws_multidimensional_charge_cp";
     *     private String hdfsPath = "hdfs://nameservice1/external/data/dws/dws_multidimensional_charge_data";
     */
    private String checkPointDir="hdfs://nameservice1/external/data/dws/dws_multidimensional_charge_cp";
    private String hdfsPath = "hdfs://nameservice1/external/data/dws/dws_multidimensional_charge_data";


    public static void main(String[] args) throws Exception{
        ChargePredictDataOutputDwsTask dwsTask = new ChargePredictDataOutputDwsTask();
        StreamExecutionEnvironment env = dwsTask.loadConfInfo(args);
        DataStream<JSONObject> srcDataStream = dwsTask.sourceStd(env);
        dwsTask.outPutData(srcDataStream);
        env.execute(srcTopic+"_sink2hive");
    }

    /**
     * @description: 结果输出
     * @author:  GW00280745
     **/
    private void outPutData(DataStream resultData) {
        //end. 信息sink到对应topic并持久化
        if (true){
            dataOutput(resultData);
        }else {
            resultData.printToErr();
        }
    }
    private void dataOutput(DataStream resultData){
        List<Object> keyListResult = ChargePredictFields.ins().resInfo.keySet().stream().sorted().collect(Collectors.toList());
        System.out.println(keyListResult);
        StreamingFileSink<String> hiveSink = StreamingFileSink
                .forRowFormat(new Path(hdfsPath), new SimpleStringEncoder<String>(ChargePredictFields.ins().utf8))
                .withBucketAssigner(new ChargeBucketAssigner<>())
                .withRollingPolicy(DefaultRollingPolicy.builder()
                        //滚动间隔 多长时间后生成一个文件 默认是60s。  它至少包含15分钟的数据
                        .withRolloverInterval(TimeUnit.MINUTES.toMillis(3))
                        //不活动间隔 多长时间没有写入就生成一个文件 默认是60s。  最近5分钟没有收到新的记录
                        .withInactivityInterval(TimeUnit.MINUTES.toMillis(1))
                        //最大尺寸 默认是128m。  文件大小达到 512m（写入最后一条记录后）
                        .withMaxPartSize(128 * 1024 * 1024L)
                        .build()
                )
                .build();
        resultData
                //每条数据转成对应hive表的数据格式  需要先指定hive数据表
                .map(new Data2HiveFormat(keyListResult))
                .returns(Types.GENERIC(String.class))
                .addSink(hiveSink)
                .name("textFile2hive");
    }
    /**
     * @description: source数据标准化，目前只是简单的字段归一，未涉及字段和value的transform
     * @author:  GW00280745
     * @date: 2022/7/4 8:10
     * @param:
     * @param: env
     * @return:
     * @return: org.apache.flink.streaming.api.datastream.DataStream<com.alibaba.fastjson.JSONObject>
     **/
    private DataStream<JSONObject> sourceStd(StreamExecutionEnvironment env) {
        DataStream<JSONObject> sourceDataStream = env
                .addSource(consumer(srcTopic,kafkaServer))
                .map(new MapFunction<String, JSONObject>() {
                    @Override
                    public JSONObject map(String value) throws Exception {
                        JSONObject jsonObject = JSONObject.parseObject(value);
                        return jsonObject.getJSONObject("body").getJSONObject("source");
                    }
                })
                .returns(Types.GENERIC(JSONObject.class))
                ;
        return sourceDataStream;
    }

    private FlinkKafkaConsumer<String> consumer(String dwdTopic, String server){
        String hHmmss = DateFormatUtils.format(System.currentTimeMillis(), "HHmmss");
        String defaultGroupId = dwdTopic.concat("#").concat(hHmmss);
        System.out.println(" 来源dwmTopic ===== "+dwdTopic);
        System.out.println(" groupId ===== [ kafka-consumer-groups --bootstrap-server 10.31.8.112:9092 --describe --group "+defaultGroupId);
        FlinkKafkaConsumer<String> consumer = FlinkKafkaUtil.getConsumer(server, defaultGroupId, dwdTopic);
        consumer.setStartFromEarliest();
//        consumer.setStartFromLatest()
        return consumer;
    }
    /**
     * @description: 加载flink配置项
     **/
    private StreamExecutionEnvironment loadConfInfo(String[] args) throws Exception{
        //1.获取env
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //2.设置并行度
        ParameterTool parameterTool = ParameterTool.fromArgs(args);
        env.setParallelism(3);
        //3.设置checkPoint
        //1. 设置 state backend 状态后端
        //env.setStateBackend(new RocksDBStateBackend(EndTempConf.getInstance().getStrVal(EndTempConstant.flinkCheckpointDir), true))
        //增量检查点的好处是快速 全量检查点的好处是全但有个风险就是状态一旦变多就会导致失败
        env.setStateBackend(new EmbeddedRocksDBStateBackend(true));
        //2.checkpoint
        CheckpointConfig config = env.getCheckpointConfig();
        // 设置模式为exactly-once ; 模式支持EXACTLY_ONCE()/AT_LEAST_ONCE()
        config.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 设置checkpoint的周期, 每隔5000 ms进行启动一个检查点 ; 启用 checkpoint,设置触发间隔（两次执行开始时间间隔）
        config.setCheckpointInterval(120*1000L);
        // 检查点必须在一分钟内完成，或者被丢弃 checkpoint的超时时间
        config.setCheckpointTimeout(60*1000L);
        // 同时并发数量; 同一时间只允许进行一个检查点
        config.setMaxConcurrentCheckpoints(1);
        // 任务流取消和故障时会保留Checkpoint数据，以便根据实际需要恢复到指定的Checkpoint ; 外部checkpoint(例如文件存储)清除策略
        config.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //存储位置，FileSystemCheckpointStorage(文件存储)
        config.setCheckpointStorage(checkPointDir);
        // 确保检查点之间有至少500 ms的间隔-checkpoint最小间隔 ;最小间隔时间（前一次结束时间，与下一次开始时间间隔）
        config.setMinPauseBetweenCheckpoints(100*1000);
        //失败重试
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, Time.seconds(10)));
        config.setTolerableCheckpointFailureNumber(CheckpointFailureManager.UNLIMITED_TOLERABLE_FAILURE_NUMBER);

        return env;
    }
}
