package com.data.capture.config;

import com.alibaba.druid.pool.DruidDataSource;
import com.data.capture.config.properties.CdcProperties;
import com.data.capture.converter.MySqlDateTimeConverter;
import com.data.capture.mq.dto.ChangeDataCaptureInfo;
import com.data.capture.serialize.CustomDebeziumDeserializationSchema;
import com.data.capture.sink.RocketMqSinkFunction;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.source.MySqlSourceBuilder;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import io.debezium.connector.mysql.MySqlConnectorConfig;
import lombok.extern.log4j.Log4j2;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.PipelineOptionsInternal;
import org.apache.flink.core.execution.JobClient;
import org.apache.flink.runtime.state.storage.FileSystemCheckpointStorage;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.kerby.util.HexUtil;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.env.MutablePropertySources;
import org.springframework.core.env.PropertySource;
import org.springframework.core.env.StandardEnvironment;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;
import javax.sql.DataSource;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.stream.Collectors;

/**
 * MySQL 数据源 Flink CDC 执行器
 * @author linch
 **/
@Log4j2
@Component
public class MySqlSourceStreamRegister implements DisposableBean {

    @Value("${data.flink-cdc.checkpoint-storage:opt/mysql/flink/checkpoints}")
    private String checkpointStorage;
    @Value("${data.flink-cdc.savepoint-storage:opt/mysql/flink/savepoints}")
    private String savepointStorage;
    /**
     * 任务客户端
     */
    private static final Map<String, JobClient> FLINK_CLIENT_MAP = Maps.newConcurrentMap();
    @Autowired
    private StandardEnvironment environment;
    @Autowired
    private DataSource dataSource;

    @Override
    public void destroy() {
        FLINK_CLIENT_MAP.forEach((dsName, client) -> {
            String dir = String.format("%s/%s", savepointStorage, dsName);
            CompletableFuture<String> future = client.stopWithSavepoint(true, dir);
            try {
                // 调用get方法阻塞主线程，等待子线程返回内容
                String target = future.get();
                log.info("保存运行状态保存点，保存地址：{}", target);
            } catch (Exception e) {
                log.error("保存点文件保存失败，错误信息：", e);
            }
        });
    }

    @PostConstruct
    public void register() throws BeansException {
        MutablePropertySources sources = environment.getPropertySources();
        Map<String, CdcProperties> mqSinkPropertiesMap = this.getSinkPropertiesMap(sources);
        Map<String, Map<String, CdcProperties>> dbGroup = this.groupByDb(mqSinkPropertiesMap);
        dbGroup.forEach((dbName, propMap) -> {
            MySqlSource<ChangeDataCaptureInfo> source = this.buildMySqlSource((DruidDataSource) dataSource, dbName, propMap);
            if (Objects.isNull(source)) {
                return;
            }
            RocketMqSinkFunction sinkFunction = RocketMqSinkFunction.builder().setMqSinkFunction(propMap).build();
            // 一个Environment只能监听一个数据库Scheme，所以这边根据监听的Scheme拆分成多个Environment
            StreamExecutionEnvironment env = this.buildStreamExecutionEnvironment(dbName, source, sinkFunction);
            try {
                JobClient client = env.executeAsync();
                FLINK_CLIENT_MAP.put(dbName, client);
                log.info("Flink MySQL CDC 启动完成！db = [{}] JobId = [{}]", dbName, client.getJobID());
            } catch (Exception e) {
                log.error("Flink MySQL CDC 启动失败！db = [{}]", dbName, e);
            }
        });
    }

    /**
     * 构建执行环境
     * @param source       Flink MySQL 数据源
     * @param sinkFunction 数据变更信息流
     * @return 执行环境
     */
    private StreamExecutionEnvironment buildStreamExecutionEnvironment(String dbName, MySqlSource<ChangeDataCaptureInfo> source, SinkFunction<ChangeDataCaptureInfo> sinkFunction) {
        Configuration configuration = new Configuration();
        String savepoint = this.getSavepoint(dbName);
        if (StringUtils.isNotBlank(savepoint)) {
            String dir = String.format("file:///%s/%s/%s", savepointStorage, dbName, savepoint);
            log.info("从运行状态保存点启动，保存点地址：{}", dir);
            configuration.setString("execution.savepoint.path", dir);
        }
        // 任务名称,flink要求jobId有特殊的格式，所以需要这么做
        String jobId = String.format("%s00000000000000000000000000000000", HexUtil.bytesToHex(dbName.getBytes())).substring(0, 32);
        log.info("Flink MySQL CDC dbName={} jobId={}", dbName, jobId);
        configuration.setString(PipelineOptionsInternal.PIPELINE_FIXED_JOB_ID, jobId);
        // configuration.setInteger("rest.port", 9841);
        // configuration.setString(RestOptions.BIND_PORT, "10000-10100");
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(configuration);
        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        // 模式支持EXACTLY_ONCE()/AT_LEAST_ONCE()
        checkpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 存储位置，FileSystemCheckpointStorage(文件存储)
        String dir = String.format("file:///%s/%s", checkpointStorage, dbName);
        checkpointConfig.setCheckpointStorage(new FileSystemCheckpointStorage(dir));
        // savepoint存储位置
        // TODO 启用这行代码程序抛异常 TaskExecutorLocalStateStoresManager is already closed and cannot register a new TaskLocalStateStore. 原因未知
        // env.setDefaultSavepointDirectory("file:///D:/opt/social/flink/checkpoints");
        // 超时时间，checkpoint没在时间内完成则丢弃
        // checkpointConfig.setCheckpointTimeout(10 * 1000L);
        // checkpointConfig.setMaxConcurrentCheckpoints(2);
        // 最小间隔时间（前一次结束时间，与下一次开始时间间隔）
        // checkpointConfig.setMinPauseBetweenCheckpoints(5 * 1000L);
        // 外部checkpoint(例如文件存储)清除策略
        checkpointConfig.setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        // 启用 checkpoint, 设置触发间隔（两次执行开始时间间隔）
        checkpointConfig.setCheckpointInterval(5 * 1000L);
        DataStreamSource<ChangeDataCaptureInfo> streamSource = env.fromSource(source, WatermarkStrategy.noWatermarks(), "MySQLSource");
        streamSource.addSink(sinkFunction).name("RocketMQDisposeSinkFunction");
        return env;
    }

    /**
     * 获取保存点地址
     * @param dbName 数据库名称
     * @return 保存点地址
     */
    private String getSavepoint(String dbName) {
        String dir = String.format("%s/%s", savepointStorage, dbName);
        File file = new File(dir);
        if (!file.exists()) {
            return StringUtils.EMPTY;
        }
        File[] listFiles = file.listFiles();
        if (Objects.isNull(listFiles)) {
            return StringUtils.EMPTY;
        }
        Optional<File> max = Arrays.stream(listFiles).max(Comparator.comparing(File::lastModified));
        return max.map(File::getName).orElse(StringUtils.EMPTY);
    }

    /**
     * 构建 Flink MySQL 数据源
     * @param dataSource          应用数据源
     * @param database            监控数据库名
     * @param mqSinkPropertiesMap key=SinkName, value={@link CdcProperties} MQ流水槽配置信息
     * @return Flink MySQL 数据源
     */
    private MySqlSource<ChangeDataCaptureInfo> buildMySqlSource(DruidDataSource dataSource, String database, Map<String, CdcProperties> mqSinkPropertiesMap) {
        String url = dataSource.getUrl().replaceAll("^(.*)\\?.*", "$1");
        String domain = url.substring(url.lastIndexOf("://"));
        String regex = "^://(.*):(.*)/(.*)$";
        String host = domain.replaceAll(regex, "$1");
        String port = domain.replaceAll(regex, "$2");
        Set<String> tableSet = Sets.newHashSet();
        mqSinkPropertiesMap.forEach((sinkName, value) -> {
            String tableList = value.getTableList();
            tableSet.addAll(Arrays.stream(tableList.split(",")).map(String::trim).collect(Collectors.toSet()));
        });
        log.info("构建MySQL数据源，数据库 = [{}] 数据表 = [{}]", database, String.join(",", tableSet));
        if (CollectionUtils.isEmpty(tableSet)) {
            log.info("监控的数据表为空，不创建Flink MySQL数据源。");
            return null;
        }
        Properties properties = new Properties();
        properties.setProperty("database.ssl.mode", "preferred");
        //自定义时间转换配置
        properties.setProperty("converters", "dateConverters");
        properties.setProperty("dateConverters.type", MySqlDateTimeConverter.class.getName());
        //properties.setProperty("signal.enabled.channels","source");
        //properties.setProperty("signal.data.collection","WG_YW.debezium_signal");
        //这个beta版本的配置，支持debezium在启动的时候，若存在新增的table，则会刷新快照，并继续执行
        // 具体见源码：io.debezium.connector.mysql.MySqlConnectorConfig.SNAPSHOT_NEW_TABLES
        properties.setProperty("snapshot.new.tables", MySqlConnectorConfig.SnapshotNewTables.PARALLEL.getValue());

        MySqlSourceBuilder<ChangeDataCaptureInfo> builder = MySqlSource.builder();
        return builder.hostname(host)
            .port(Integer.parseInt(port))
            .username(dataSource.getUsername())
            .password(dataSource.getPassword())
            .databaseList(database)
            .scanNewlyAddedTableEnabled(true)
            .tableList(String.join(",", tableSet))
            // 设置空字符串监控本Scheme下的所有表，包括新建的数据表（如果只设置固定的表，则新增监听表后无法从保存点启动）
            // 不做业务处理，监听到新增131072条数据，能在几秒内完成
            .debeziumProperties(properties)
            // 启动模式，判断数据是需要进行初始化、last还是指定区间
            .startupOptions(StartupOptions.latest())
            .deserializer(new CustomDebeziumDeserializationSchema())
            .build();
    }

    /**
     * 根据数据库分组
     * @param mqSinkPropertiesMap &lt;SinkName, CreditMQSinkProperties&gt; MQ流水槽配置信息
     * @return 数据库分组的MQ流水槽配置信息
     */
    private Map<String, Map<String, CdcProperties>> groupByDb(Map<String, CdcProperties> mqSinkPropertiesMap) {
        Map<String, Map<String, CdcProperties>> dbGroup = Maps.newHashMap();
        mqSinkPropertiesMap.forEach((sinkName, properties) -> {
            if (StringUtils.isBlank(properties.getTopicName()) || StringUtils.isBlank(properties.getTableList())) {
                return;
            }
            log.info("读取到监控配置 topicName = [{}] tableList = [{}]", properties.getTopicName(), properties.getTableList());
            Arrays.stream(properties.getTableList().split(","))
                .collect(Collectors.groupingBy(tableName -> tableName.replaceAll("^(.*)\\..*$", "$1")))
                .forEach((dbName, tableNameList) -> {
                    Map<String, CdcProperties> map = dbGroup.get(dbName);
                    if (Objects.isNull(map)) {
                        map = Maps.newHashMap();
                        dbGroup.put(dbName, map);
                    }
                    map.put(sinkName, new CdcProperties(properties, tableNameList));
                });
        });
        return dbGroup;
    }

    final String sinkPrefix = "data.flink-cdc.sink";

    /**
     * 获取MQ流水槽配置信息
     * @param sources 配置源
     * @return MQ流水槽配置信息
     */
    private Map<String, CdcProperties> getSinkPropertiesMap(MutablePropertySources sources) {
        Iterator<PropertySource<?>> iterator = sources.iterator();
        Map<String, CdcProperties> mqSinkPropertiesMap = Maps.newHashMap();
        while (iterator.hasNext()) {
            PropertySource<?> next = iterator.next();
            Object source = next.getSource();
            if (source instanceof Map) {
                Map<String, ?> sourceMap = (Map<String, ?>) source;
                // 过滤掉相关配置
                List<String> dataFlinkSink = sourceMap.keySet().stream().filter(key -> key.startsWith(sinkPrefix)).collect(Collectors.toList());

                List<String> dealSink = new ArrayList<>();
                for (String sinkConfig : dataFlinkSink) {
                    String regex = "^data\\.flink-cdc\\.sink\\.(.*)\\.(.*)$";
                    String sinkName = sinkConfig.replaceAll(regex, "$1");
                    if (dealSink.contains(sinkName)) {
                        continue;
                    }
                    dealSink.add(sinkName);
                    mqSinkPropertiesMap.putIfAbsent(sinkName, new CdcProperties());
                    CdcProperties properties = mqSinkPropertiesMap.get(sinkName);
                    properties.setTopicName(getSinkProperties(sinkName, "topic-name"));
                    properties.setTableList(getSinkProperties(sinkName, "table-list"));
                    properties.setOnlyPushPrimary(Boolean.valueOf(getSinkProperties(sinkName, "only-push-primary")));
                    String includeFieldStr = getSinkProperties(sinkName, "include-field-list");
                    properties.setIncludeField(includeFieldStr);
                    String excludeFieldStr = getSinkProperties(sinkName, "exclude-field-list");
                    properties.setExcludeField(excludeFieldStr);
                }
            }
        }
        return mqSinkPropertiesMap;
    }


    String getSinkProperties(String sinkName, String fieldName) {
        return environment.getProperty(String.format(sinkPrefix + ".%s.%s", sinkName, fieldName));
    }

}
