package com.sl.cdc.modular.debezium;

import cn.hutool.core.collection.CollUtil;
import cn.hutool.core.util.ObjectUtil;
import cn.hutool.core.util.StrUtil;
import com.sl.cdc.api.CdcConfigApi;
import com.sl.cdc.api.domain.TaskDataBaseInfo;
import com.sl.cdc.api.domain.TaskDataSourceInfo;
import com.sl.cdc.api.domain.TaskInfo;
import com.sl.cdc.api.domain.TaskTableInfo;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import lombok.SneakyThrows;
import org.apache.flink.api.common.JobID;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.AbstractID;

import java.util.Collection;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;


public class CdcFlinkManager implements CdcConfigApi {

    private final StreamExecutionEnvironment env;

    private final CdcFlinkSink customSink;


    public CdcFlinkManager() {
        Configuration config = new Configuration();
        config.set(RestOptions.PORT, 9090);
        config.set(RestOptions.URL_PREFIX, "/flink");
        // 环境配置
        env = StreamExecutionEnvironment.getExecutionEnvironment(config);
        // 设置 6s 的 checkpoint 间隔
        env.enableCheckpointing(6000);
        //启动web端口


//        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(100);
//        env.getCheckpointConfig().setCheckpointInterval(Duration.ofMinutes(10).toMillis());

        // 设置 source 节点的并行度为 4
        env.setParallelism(2);

//        env.setRuntimeMode(RuntimeExecutionMode.STREAMING);

        customSink = new CdcFlinkSink();
    }

    @SneakyThrows
    @Override
    public String addTask(TaskInfo taskInfo) {
        TaskDataSourceInfo sourceInfo = taskInfo.getSourceInfo();
        Properties properties = taskInfo.getProperties();
        String id = taskInfo.getId();
        Properties jdbcProperties = new Properties();
        jdbcProperties.setProperty("useSSL", "false");
        // 配置 Debezium在初始化快照的时候（扫描历史数据的时候） =》 不要锁表
        jdbcProperties.setProperty("debezium.snapshot.locking.mode", "none");

        MySqlSource<String> source = MySqlSource.<String>builder()
                .debeziumProperties(properties)
                .hostname(sourceInfo.getHostname())
                .port(Integer.valueOf(sourceInfo.getPort()))
                // 可配置多个数据库
                .databaseList(buildDatabaseIncludeList(sourceInfo).toArray(new String[]{}))
                // 可配置多个表
                .tableList(buildTableIncludeList(sourceInfo).toArray(new String[]{}))
                .username(sourceInfo.getUser())
                .password(sourceInfo.getPassword())
//                .serverTimeZone("GMT+8")
                .jdbcProperties(jdbcProperties)
                // 包括schema的改变
                .includeSchemaChanges(true)
                // 反序列化设置
                // .deserializer(new StringDebeziumDeserializationSchema())
                .deserializer(new JsonDebeziumDeserializationSchema(true))
                // 启动模式；关于启动模式下面详细介绍
                //initial （默认）：在第一次启动时对受监视的数据库表执行初始快照，并继续读取最新的 binlog。
                //earliest-offset：跳过快照阶段，从可读取的最早 binlog 位点开始读取
                //latest-offset：首次启动时，从不对受监视的数据库表执行快照， 连接器仅从 binlog 的结尾处开始读取，这意味着连接器只能读取在连接器启动之后的数据更改。
                //specific-offset：跳过快照阶段，从指定的 binlog 位点开始读取。位点可通过 binlog 文件名和位置指定，或者在 GTID 在集群上启用时通过 GTID 集合指定。
                //timestamp：跳过快照阶段，从指定的时间戳开始读取 binlog 事件。
                .startupOptions(StartupOptions.initial())
                .build();
        DataStreamSource<String> stringDataStreamSource = env.fromSource(source, WatermarkStrategy.noWatermarks(), id);
//        stringDataStreamSource.print();

//        env.setStateBackend(new HashMapStateBackend());
//        env.getStateBackend()
        stringDataStreamSource.addSink(customSink);
        return id;
    }

    private List<String> buildTableIncludeList(TaskDataSourceInfo sourceInfo) {
        if (ObjectUtil.isNull(sourceInfo)) {
            return null;
        }
        List<TaskDataBaseInfo> dataBaseList = sourceInfo.getDataBaseList();

        List<String> collect = dataBaseList.stream().filter(v -> {
            return CollUtil.isNotEmpty(v.getTableList());
        }).map(v -> {
            String code = v.getCode();
            List<TaskTableInfo> tableList = v.getTableList();

            return tableList.stream().map(t -> StrUtil.concat(true, code, "." + t.getCode())).collect(Collectors.toSet());
        }).flatMap(Collection::stream).toList();
        return collect;
    }


    private List<String> buildDatabaseIncludeList(TaskDataSourceInfo sourceInfo) {
        if (ObjectUtil.isNull(sourceInfo)) {
            return null;
        }
        List<TaskDataBaseInfo> dataBaseList = sourceInfo.getDataBaseList();
        return dataBaseList.stream().map(TaskDataBaseInfo::getCode).toList();
    }

    @Override
    public Boolean start() {
        return this.start(null);
    }

    @Override
    public String getStatus(String id) {
        return "";
    }

    @SneakyThrows
    @Override
    public Boolean start(String id) {
        env.execute(id);
        return Boolean.TRUE;
    }

    @Override
    public Boolean close() {
        return this.close(null);
    }

    @SneakyThrows
    @Override
    public Boolean close(String id) {
        AbstractID abstractID = new JobID();

        env.invalidateClusterDataset(abstractID);
        return Boolean.TRUE;
    }

    @Override
    public Boolean remove(String id) {
        this.close(id);
        return Boolean.TRUE;
    }
}
