package com.patsnap.data.npd.dw.etl.job;

import com.google.common.collect.ImmutableMap;
import com.patsnap.data.npd.dw.core.pipeline.AbstractPipeline;
import com.patsnap.data.npd.dw.core.pipeline.PipelineContext;
import com.patsnap.data.npd.dw.etl.base.RateLimitWatermarkGeneratorSupplier;
import com.patsnap.data.npd.dw.etl.serialization.TicdcCanalJsonKafkaRecordDeserializationSchema;
import com.patsnap.one.etl.cdc.CdcRecord;
import com.patsnap.one.etl.flink.job.AbstractFlinkJobLauncher;
import com.patsnap.one.etl.table.dto.SourceTableDto;
import com.patsnap.one.etl.table.meta.definition.SourceTableDefinition;
import com.patsnap.one.etl.table.meta.resolver.SourceTableDefinitionResolver;
import com.patsnap.one.etl.tool.datasource.DataSourceAssembler;
import com.patsnap.one.etl.tool.kafka.KafkaConfig;
import com.patsnap.one.etl.tool.kafka.KafkaHelper;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.OutputTag;
import org.apache.kafka.clients.consumer.ConsumerConfig;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;

import static com.patsnap.one.etl.constant.Constants.LAUNCH_ARGS;
import static com.patsnap.one.etl.constant.Constants.LAUNCH_CLASS;
import static com.patsnap.one.etl.version.Version.EASY_ETL_COMPONENT_VERSION_KEY;
import static com.patsnap.one.etl.version.Version.EASY_ETL_COMPONENT_VERSION_VALUE;

@Slf4j
public abstract class AbstractFlinkStreamJob<T extends PipelineContext> extends AbstractFlinkJobLauncher {

    protected ParameterTool parameter;
    protected List<OutputTag<CdcRecord>> sourceOutputTags;
    protected OutputTag<CdcRecord> errorCdcOutputTag;

    @Override
    protected void laucher(String[] args) throws Exception {
        // 获取apollo配置
        // 参数中类似 --apollo.app.cluster suzhou_office --apollo.app.profile patent_link.ci --apollo.app.id s-data-patent-dw-pipeline
        parameter = mergeApolloConfig(ParameterTool.fromArgs(args));

        // 作业的名称
        String jobName = parameter.get("job_name");

        StreamExecutionEnvironment env = getExecutionEnvironment(parameter, args, this.getClass().getName());
        // #9
        boolean isCheckpoint = Boolean.parseBoolean(parameter.get("is_checkpoint", "true"));
        if (isCheckpoint) {
            enableCheckpointing(env, parameter);
        }

        AbstractPipeline<T> pipeline = getPipeline(null);

        SingleOutputStreamOperator<CdcRecord> cdcStream = addSource(env);

        addProcess(cdcStream, pipeline);

        env.setRestartStrategy(isCheckpoint ? RestartStrategies.fixedDelayRestart(3, Time.minutes(5)) : RestartStrategies.noRestart());

        env.execute(jobName);
    }

    protected ParameterTool mergeApolloConfig(ParameterTool parameterTool) {
        parameterTool = super.mergeApolloConfig(parameterTool);
        parameterTool.toMap().forEach((key, value) -> System.getProperties().setProperty(key, value));
        return parameterTool;
    }

    protected WatermarkStrategy<CdcRecord> getWatermarkStrategy(ParameterTool parameterTool) {
        // 控制kafka消费速率
        int sourceCountPerSec = Integer.parseInt(parameterTool.get("source_count_per_sec", "10000"));
        return WatermarkStrategy
                .forGenerator(new RateLimitWatermarkGeneratorSupplier(sourceCountPerSec, 1000))
                .withTimestampAssigner((event, timestamp) -> System.currentTimeMillis());
    }

    protected TicdcCanalJsonKafkaRecordDeserializationSchema getTicdcCanalJsonKafkaRecordDeserializationSchema() {
        Map<Class<? extends SourceTableDto>, SourceTableDefinition> tableDefinitionMap = SourceTableDefinitionResolver.resolve("com.patsnap.data.patent.dw.db.entity");
        Map<String, SourceTableDefinition> sourceTableDefinitionMap = tableDefinitionMap.values().stream().collect(Collectors.toMap(SourceTableDefinition::getTableName, Function.identity()));
        return new TicdcCanalJsonKafkaRecordDeserializationSchema(null, null, null, sourceTableDefinitionMap);
    }

    protected SingleOutputStreamOperator<CdcRecord> addSource(StreamExecutionEnvironment env) {
        // 实例化KafkaSource，并选择合适的反序列化器
        Map<String, KafkaConfig> kafkaConfigMap = DataSourceAssembler.buildKafkaConfigsFromParameterTool(parameter);
        KafkaConfig sourceKafkaConfig = kafkaConfigMap.get("source");
        String topics = sourceKafkaConfig.getTopics();
        KafkaSource<CdcRecord> kafkaSource = KafkaHelper.<CdcRecord>builder(sourceKafkaConfig)
                .setDeserializer(getTicdcCanalJsonKafkaRecordDeserializationSchema())
                .setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, parameter.get("is_kafka_auto_commit", "false"))
                .build();
        List<String> topicList = Arrays.stream(topics.split(",")).collect(Collectors.toList());
        if (topicList.size() > 1) {
            //如果是多个topic ，提前创建好旁路，支持在process中根据表名进行多旁路操作。
            sourceOutputTags = new ArrayList<>();
            sourceOutputTags.addAll(topicList.stream()
                    .map(p -> new OutputTag<>(StringUtils.trim(p).split("\\.")[2], TypeInformation.of(CdcRecord.class)))
                    .collect(Collectors.toList()));
        }
        if (kafkaConfigMap.containsKey("target")) {
            KafkaConfig targetKafkaConfig = kafkaConfigMap.get("target");
            String topic = targetKafkaConfig.getTopics();
            errorCdcOutputTag = new OutputTag<>(topic, TypeInformation.of(CdcRecord.class));
        }
        return env.fromSource(kafkaSource, getWatermarkStrategy(parameter), sourceKafkaConfig.getTopics());
    }

    protected abstract void addProcess(SingleOutputStreamOperator<CdcRecord> cdcStream, AbstractPipeline<T> pipeline);

    /**
     * 需要实现 Pipeline
     *
     * @param justForLimitContextClass 这个参数不用管，只是为了限制返回结果的泛型类型需要与Pipeline返回的上下文类型一致。
     * @return
     */
    protected abstract AbstractPipeline<T> getPipeline(Class<T> justForLimitContextClass);

    protected StreamExecutionEnvironment getExecutionEnvironment(ParameterTool parameterTool, String[] args, String launchClass) {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        ParameterTool launcherParms = ParameterTool.fromMap(ImmutableMap.of(LAUNCH_ARGS, String.join(" ", args), LAUNCH_CLASS, launchClass, EASY_ETL_COMPONENT_VERSION_KEY, EASY_ETL_COMPONENT_VERSION_VALUE));
        env.getConfig().setGlobalJobParameters(parameterTool.mergeWith(launcherParms));
        // cdc数据流需要确保绝对的顺序性，在多个算子下，一旦出现并行度变化，则出现rebalance会导致顺序性丢失，故设置全局默认的并行度与source并行度一致
        return env;
    }

}