package com.flink.cdc;

import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;

import java.util.Arrays;
import java.util.Properties;

public class FlinkCdc {

    private static final String DEFAULT_SOURCE_PARALLELISM = "1";
    private static final String DEFAULT_SINK_PARALLELISM = "1";
    private static final String DEFAULT_FLINK_CHECK_POINTING = "3000";
    private static final String NULL_CONFIG = "null";

    public static void main(String[] args) throws Exception {
        ParameterTool parameter = ParameterTool.fromArgs(args);
        // mysql source 配置
        String sourceHostname = parameter.get("source.hostname");
        int sourcePort = Integer.parseInt(parameter.get("source.port"));
        String sourceUsername = parameter.get("source.username");
        String sourcePassword = parameter.get("source.password");
        // 库表配置
        String[] databaseList = parameter.get("source.databaseList").split(",");
        String[] tableList = Arrays.stream(parameter.get("source.tableList", "").split(","))
                .map(table -> databaseList.length == 1 && table.length() > 0 ? databaseList[0] + '.' + table : table)
                .toArray(String[]::new);
        // kafka 配置
        String kafkaHost = parameter.get("kafka.host");
        String topicId = parameter.get("kafka.topic");
        // flink 配置
        long checkPointing = Long.parseLong(parameter.get("flink.checkPointing", DEFAULT_FLINK_CHECK_POINTING));
        int sourceParallelism = Integer.parseInt(parameter.get("source.parallelism", DEFAULT_SOURCE_PARALLELISM));
        int sinkParallelism = Integer.parseInt(parameter.get("sink.parallelism", DEFAULT_SINK_PARALLELISM));
        // serverId 配置
        String serverId = parameter.get("serverId", NULL_CONFIG);
        if (NULL_CONFIG.equals(serverId)) {
            throw new RuntimeException("serverId 必须要设置");
        }
        if (sourceParallelism > 1) {
            serverId = serverId + "-" + (Integer.parseInt(serverId) + sourceParallelism - 1);
        }
        String jobName = "数据迁移serverId:" + serverId + ";库:" + parameter.get("source.databaseList") +
                ";表:" + parameter.get("source.tableList", "all");

        // source -> flink 序列化配置
        Properties debeziumProperties = new Properties();
        debeziumProperties.setProperty("decimal.handling.mode", "double");
        debeziumProperties.setProperty("bigint.unsigned.handling.mode", "long");

        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
                .hostname(sourceHostname)
                .port(sourcePort)
                // set captured database
                .databaseList(databaseList)
                // set captured table
                .tableList(tableList)
                .username(sourceUsername)
                .password(sourcePassword)
                .serverId(serverId)
                .debeziumProperties(debeziumProperties)
                .deserializer(new JsonDebeziumDeserializationSchema())
                .build();

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // enable checkpoint
        env.enableCheckpointing(checkPointing);
        env.getCheckpointConfig().enableExternalizedCheckpoints(ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.getCheckpointConfig().setCheckpointTimeout(7200000);
        env.getCheckpointConfig().setCheckpointStorage("file:///flink-checkpoints/" + serverId);

        // kafka producer 相关配置
        Properties propertiesProducer = new Properties();
        propertiesProducer.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaHost);
        propertiesProducer.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "2097152");

        FlinkKafkaProducer<String> flinkKafkaProducer = new FlinkKafkaProducer<>(
                topicId,
                new SimpleStringSerializationSchemaWrapper(false),
                propertiesProducer,
                FlinkKafkaProducer.Semantic.AT_LEAST_ONCE);
        env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "MySQL Source")
                .setParallelism(sourceParallelism)
                .addSink(flinkKafkaProducer)
                .name(jobName)
                .setParallelism(sinkParallelism);
        env.execute(jobName);
    }
}
