package com.yifeng.repo.flink.data.transport.bootstrap;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupMode;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import com.yifeng.repo.flink.data.transport.config.JobParametersConfig;
import com.yifeng.repo.flink.data.transport.config.SourceDbConfig;
import com.yifeng.repo.flink.data.transport.streaming.connectors.kafka.IncreKafkaMysqlSerializationSchema;
import com.yifeng.repo.flink.data.transport.streaming.connectors.mysql.MysqlDebeziumDeserializationSchema;
import com.yifeng.repo.flink.data.transport.utils.id.ObjectId;
import com.yifeng.repo.flink.data.transport.utils.kafka.KafkaUtil;

/**
 * mysql的cdc任务,使用flink cdc的原有功能，支持不同的StartupMode(全量+增量、增量（最早、最新、时间点、指定位点）)
 * @author lijing
 * @since 2023-04-12
 * 
 */
public class MysqlFlinkCdcTask {

	public static void main(String[] args) throws Exception {
		//获取配置
		JobParametersConfig config = BootstrapHelper.setEnvAndParameter(args);
		
		SourceDbConfig sourceDbConfig = config.getSourceDbConfig();
		
		MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
                .hostname(sourceDbConfig.getHostname())
                .port(sourceDbConfig.getPort())
                .databaseList(sourceDbConfig.getDatabase())
                .tableList(sourceDbConfig.getTableList())
                .username(sourceDbConfig.getUsername())
                .password(sourceDbConfig.getPassword())
				.debeziumProperties(BootstrapHelper.mysqlDebeziumProperties(sourceDbConfig.getColumnExcludeList(),sourceDbConfig.getColumnIncludeList(),config.isIncludeSchemaChange()))
				.deserializer(new MysqlDebeziumDeserializationSchema(config.isSchemaChangeToDownstream(),sourceDbConfig.getDatabase()))
				.startupOptions(startupOptions(config))
				.includeSchemaChanges(config.isIncludeSchemaChange())
				.scanNewlyAddedTableEnabled(config.isScanNewlyAddedTableEnabled())
				.build();
		
		StreamExecutionEnvironment env = BootstrapHelper.setEnvironment(config.getStreamExecutionConfig());
		
		DataStreamSource<String> mysqlSourceDataStream = env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), config.getCdcTaskname()).setParallelism(config.getSourceParallelism()); 
        //生成事务ID 
		String transactionId = ObjectId.get().toString();
		mysqlSourceDataStream.addSink(KafkaUtil.getKafkaBySchema(new IncreKafkaMysqlSerializationSchema(config.getKafkaConfig().getPartitionSize(),config.getKafkaConfig().getTopic()),transactionId,config.getKafkaConfig())).name(config.getCdcTaskname() + ":" + transactionId).uid(transactionId).setParallelism(config.getSinkParallelism());
		
		env.execute(config.getCdcTaskname());
	}
	
	/**
	 * 判断Flink cdc的启动模式
	 * @param config 配置
	 * @return 启动模式
	 */
	public static StartupOptions startupOptions(JobParametersConfig config) {
		StartupMode startupMode = StartupMode.valueOf(config.getStartupMode());
		switch (startupMode) {
		case INITIAL:
			return StartupOptions.initial();
		case EARLIEST_OFFSET:
			return StartupOptions.earliest();
		case LATEST_OFFSET:
			return StartupOptions.latest();
		case SPECIFIC_OFFSETS:
			return StartupOptions.specificOffset(config.getSpecificOffsetFile(), config.getSpecificOffsetPos());
		case TIMESTAMP:
			return StartupOptions.timestamp(config.getStartupTimestampMillis());
		default:
			throw new UnsupportedOperationException(startupMode + " mode is not supported.");
		}
	}
}
