package com.yifeng.repo.flink.data.transport.bootstrap;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import com.yifeng.repo.flink.data.transport.config.JobParametersConfig;
import com.yifeng.repo.flink.data.transport.config.KafkaConfig;
import com.yifeng.repo.flink.data.transport.streaming.connectors.tidb.sink.WriteToTidbSink;
import com.yifeng.repo.flink.data.transport.utils.kafka.KafkaUtil;

/**
 * kafka的cdc任务,使用flink cdc的原有功能，支持不同的StartupMode(全量+增量、增量（最早、最新、时间点、指定位点）)
 * @author lijing
 * @since 2023-06-16
 * 
 */
public class KafkaFlinkCdcTask {

	public static void main(String[] args) throws Exception {
		//获取配置
		JobParametersConfig config = kafkaBootstrapHelper.setEnvAndParameter(args);
		
		KafkaConfig kafkaConfig = config.getKafkaConfig();
		
		KafkaSource<String> kafkaSource = KafkaSource.<String>builder()
	                .setBootstrapServers(kafkaConfig.getBootstrapServers())
	                .setTopics(kafkaConfig.getTopic())
	                .setGroupId(kafkaConfig.getGroupId())
	                .setStartingOffsets(kafkaBootstrapHelper.offsetsInitializer(config))
	                .setValueOnlyDeserializer(new SimpleStringSchema())
	                .setProperties(KafkaUtil.getConsumerProperties(config.getKafkaConfig()))
	                .build();
		
		StreamExecutionEnvironment env = BootstrapHelper.setEnvironment(config.getStreamExecutionConfig());
		
		DataStreamSource<String> kafkaSourceDataStream = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), config.getCdcTaskname()).setParallelism(config.getSourceParallelism()); 
		kafkaSourceDataStream.addSink(new WriteToTidbSink(config.getSinkDbConfig())).setParallelism(config.getSinkParallelism());
		
		env.execute(config.getCdcTaskname());
	}
	
}
