package com.sjk.flink;

import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.sjk.flink.analysis.IAnalysisHander;
import com.sjk.flink.exeception.BaseException;
import com.sjk.flink.exeception.ErrorCode;
import com.sjk.flink.functions.DorisProcessFunction;
import com.sjk.flink.kafka.MyKafkaDeserializationSchema;
import com.sjk.flink.util.ObjectMapperUtil;
import org.apache.doris.flink.cfg.DorisExecutionOptions;
import org.apache.doris.flink.cfg.DorisOptions;
import org.apache.doris.flink.cfg.DorisSink;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.core.fs.FSDataInputStream;
import org.apache.flink.core.fs.FileSystem;
import org.apache.flink.core.fs.Path;
import org.apache.flink.runtime.fs.hdfs.HadoopFsFactory;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.util.OutputTag;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.net.URI;
import java.util.*;


/**
 *
 * @author SAMA
 */
public class MultipleStreamProcess {

    private static final Logger log = LoggerFactory.getLogger(ExpireStreamProcess.class);

    public static void main(String[] args) throws Exception {
        // 1.读取参数
        ParameterTool parameterTool = ParameterTool.fromArgs(args);
        boolean istest = parameterTool.getBoolean("test", true);

        // 创建读取hdfs文件的文件系统实现
        HadoopFsFactory hadoopFsFactory = new HadoopFsFactory();
        // 输入华为hdfs地址
        FileSystem fileSystem = hadoopFsFactory.create(new URI("hdfs://hacluster/"));
        // 输入配置文件地址，为hdfs中的绝对路径
        FSDataInputStream fsDataInputStream = fileSystem
                .open(new Path("/tmp/flink-lk/config.json"));
        // 建议将配置文件格式设置为json，解析更方便
        // 将配置文件流式读出
        byte[] bytes = new byte[1024];
        StringBuilder sb = new StringBuilder();
        while (fsDataInputStream.read(bytes) != -1) {
            sb.append(new String(bytes));
            bytes = new byte[1024];
        }
        fsDataInputStream.close();
        // 将配置文件解析为json
        log.warn("==========================定义文件=============================");
        log.warn(sb.toString().trim());
        log.warn("=======================================================");
        ObjectMapper objectMapper = ObjectMapperUtil.getObjectMapper();
        JsonNode configJson = objectMapper.readTree(sb.toString().trim());

        // 将配置文件解析为json
        JsonNode multipleConfig = objectMapper
                .readTree(ExpireStreamProcess.class.getResourceAsStream("/MultipleStream.json"));
        if (multipleConfig == null) {
            throw new BaseException(ErrorCode.CODE_E00009);
        }
        JsonNode parallelism = multipleConfig.get("parallelsNum");
        JsonNode groups = multipleConfig.get("groups");
        // 3.构建环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // Flink 为所有 checkpoints 和 savepoints 提供可选的压缩（默认：关闭）
        env.getConfig().setUseSnapshotCompression(true);
        // 设置并行度,为了方便测试，查看消息的顺序，这里设置为1，可以更改为多并行度
        env.setParallelism(new Integer(parallelism.asText()));
        // checkpoint的设置
        // 每隔10s进行启动一个检查点【设置checkpoint的周期】
        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        // 确保检查点之间有1s的时间间隔【checkpoint最小间隔】
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000);
        // 检查点必须在20s之内完成，或者被丢弃【checkpoint超时时间】
        env.getCheckpointConfig().setCheckpointTimeout(30000);
        // 同一时间只允许进行一次检查点
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        // 表示一旦Flink程序被cancel后，会保留checkpoint数据，以便根据实际需要恢复到指定的checkpoint
        // 取消作业的时候，上一次成功checkpoint的结果，被删除了！意味着不能将上次执行累加的结果无法恢复，因此我们希望取消作业的时候，不要删除已经checkpoit成功的历史数据
        env.getCheckpointConfig()
                .enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        // 设置可容忍的检查点失败数，默认值为0表示不允许容忍任何检查点失败
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(1);

        // 固定延迟重启策略，程序出现异常的时候，重启三次，每次延迟五秒钟重启，超过三次，则程序退出
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, Time.seconds(5)));

        // env.setRuntimeMode(RuntimeExecutionMode.BATCH);
        // 保持参数
        // env.getConfig().setGlobalJobParameters(parameterTool);

        // 循环输出
        for (JsonNode node : groups) {
            String groupId = node.get("id").asText();
            String sourceTopic = node.get("sourceTopic").asText();
            String doirsTable = node.get("doirsTable").asText();
            String type = node.get("type").asText();
            Map<String, IAnalysisHander> analysisMap = new HashMap<String, IAnalysisHander>();

            JsonNode fields = node.get("fields");
            if (fields != null && fields.isArray()) {
                for (JsonNode node1 : fields) {
                    JsonNode analysisHanderNode = node1.get("analysisHander");
                    if (analysisHanderNode != null) {
                        String className = analysisHanderNode.asText();
                        IAnalysisHander analysisHander = getInstance(className);
                        if(analysisHander!=null){
                            analysisMap.put(className,analysisHander);
                        }
                    }
                }
            }

            Properties properties = new Properties();
            // todo:在kerberos安全模式下，连接华为mrs kafka需要使用21007端口
            JsonNode configBootServers = configJson.get("bootstrap.servers");
            properties.put("bootstrap.servers", configBootServers.textValue());
            // todo:下面两行配置，为使用principal认证kerberos的配置
            properties.put("security.protocol", "SASL_PLAINTEXT");
            properties.put("sasl.kerberos.service.name", "kafka");
            properties.put("auto.offset.reset", "earliest");
            properties.put("group.id", groupId);
            // 单次消费者拉取的最大数据条数，默认值500
            properties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 2000);
            // 在enable.auto.commit 为true的情况下， 自动提交的间隔，默认值5000ms
            properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 1000);
            // 消息消费超时时间增加为10分钟
            properties.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 600000);

            // 默认值true，表示消费者会周期性自动提交消费的offset
            if (istest) {
                // 不自动commit
                properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
            } else {
                // 自动commit
                properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
            }
            FlinkKafkaConsumer<ConsumerRecord<String, String>> consumer = new FlinkKafkaConsumer<ConsumerRecord<String, String>>(
                    Arrays.asList(sourceTopic), new MyKafkaDeserializationSchema(), properties);
            consumer.setStartFromGroupOffsets(); // 默认的方法, 按照消费者组读取数据，如果消费者组第一次使用，默认只读取最新的数据

            SingleOutputStreamOperator<ConsumerRecord<String, String>> ds1 = env.addSource(consumer)
                    .filter(new FilterFunction<ConsumerRecord<String, String>>() {
                        @Override
                        public boolean filter(ConsumerRecord<String, String> stringStringConsumerRecord)
                                throws Exception {
                            // TODO 有效性检查
                            String value = stringStringConsumerRecord.value();
                            String topic = stringStringConsumerRecord.topic();
                            System.out.println("=================================================");
                            System.out.println(topic);
                            System.out.println(value);
                            System.out.println("=================================================");
                            return true;
                        }
                    });

            OutputTag<String> valueTag = new OutputTag<String>("value-json") {
            };

            SingleOutputStreamOperator<ConsumerRecord<String, String>> processStream = ds1
                    .process(new DorisProcessFunction(type,fields,analysisMap, valueTag));
            processStream.print("value-json");
            
            DataStream<String> valueJsonStream = processStream.getSideOutput(valueTag);

            // 使用 Flink 将 JSON 数据 写到Doris数据库
            Properties pro = new Properties();
            pro.setProperty("format", "json");
            pro.setProperty("strip_outer_array", "true");


            valueJsonStream.addSink(DorisSink.sink(
                    new DorisExecutionOptions.Builder()
                            .setBatchIntervalMs(2000L)
                            .setEnableDelete(false)
                            .setMaxRetries(3)
                            .setStreamLoadProp(pro)
                            .build(),
                    new DorisOptions.Builder()
                            .setFenodes("3.130.142.244:8030,3.130.142.149:8030,3.130.142.171:8030")
                            .setUsername("root")
                            .setPassword("shujuzhongtai@123")
                            .setTableIdentifier(doirsTable)
                            .build()));

            valueJsonStream.print("doris " + doirsTable);

/**
            Properties props = new Properties();
            props.setProperty("format", "json");
            props.setProperty("read_json_by_line", "true");
            DorisOptions dorisOptions =
                    DorisOptions.builder()
                            .setFenodes("3.130.142.244:8030,3.130.142.149:8030,3.130.142.171:8030")
                            .setTableIdentifier(doirsTable)
                            .setUsername("root")
                            .setPassword("shujuzhongtai@123")
                            .build();

            DorisExecutionOptions.Builder executionBuilder = DorisExecutionOptions.builder();
            executionBuilder
                    .setLabelPrefix("kafka-doris" + UUID.randomUUID())
                    .setStreamLoadProp(props)
                    .setDeletable(true);

            DorisSink.Builder<String> builder = DorisSink.builder();
            builder.setDorisReadOptions(DorisReadOptions.builder().build())
                    .setDorisExecutionOptions(executionBuilder.build())
                    .setDorisOptions(dorisOptions)
                    .setSerializer(
                            JsonDebeziumSchemaSerializer.builder()
                                    .setDorisOptions(dorisOptions)
                                    .setNewSchemaChange(true)
                                    .build());

            valueJsonStream.sinkTo(builder.build());
            //valueJsonStream.addSink(builder.build());
             **/

        }
        env.execute("Flink Kafka to Doris Job");
    }

    private static IAnalysisHander getInstance(String className) {
        try {
            Class<?> forName = Class.forName(className);
            // 通过类对象类的方法实例化一个对象
            Object newInstance = forName.newInstance();
            // 判断该类实例化的对象是否为加载的类
            if (newInstance instanceof IAnalysisHander) {
                return (IAnalysisHander)newInstance;
            }
        } catch (Exception ex) {

        }
        return null;
    }
}
