package com.yifeng.repo.flink.data.transport.bootstrap;

import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import com.yifeng.repo.flink.data.transport.config.JobParametersConfig;
import com.yifeng.repo.flink.data.transport.config.SourceDbConfig;
import com.yifeng.repo.flink.data.transport.streaming.connectors.kafka.FullKafkaSerializationSchema;
import com.yifeng.repo.flink.data.transport.streaming.connectors.mysql.MySqlSource;
import com.yifeng.repo.flink.data.transport.streaming.functions.FullSourceFunction;
import com.yifeng.repo.flink.data.transport.utils.id.ObjectId;
import com.yifeng.repo.flink.data.transport.utils.kafka.KafkaUtil;

/**
 * mysql全量流式读取任务
 * @author lijing
 * @since 2023-04-12
 */
public class MysqlFullStreamingReadTask {

	public static void main(String[] args) throws Exception {
		JobParametersConfig config = BootstrapHelper.setEnvAndParameter(args);
        
		SourceDbConfig sourceDbConfig = config.getSourceDbConfig();
		FullSourceFunction sourceFunction = MySqlSource.builder()
                .hostname(sourceDbConfig.getHostname())
                .port(sourceDbConfig.getPort())
                .database(sourceDbConfig.getDatabase())
                .tableList(sourceDbConfig.getTableList())
                .username(sourceDbConfig.getUsername())
                .password(sourceDbConfig.getPassword())
                .fetchSize(sourceDbConfig.getFetchSize())
                .sqlCondition(BootstrapHelper.sqlMap(sourceDbConfig.getSqlMap()))
                .parallelism(sourceDbConfig.getParallelism())
                .build();
		
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		
        DataStreamSource dataStream = env.addSource(sourceFunction).setParallelism(config.getSourceParallelism());
        //生成事务ID
		String transactionId = ObjectId.get().toString();
        dataStream.addSink(KafkaUtil.getKafkaBySchema(new FullKafkaSerializationSchema(config.getKafkaConfig().isPartitionByHash(),config.getKafkaConfig().getPartitionSize(),config.getKafkaConfig().getTopic()),transactionId,config.getKafkaConfig())).name(config.getCdcTaskname() + ":" +transactionId).uid(transactionId).setParallelism(config.getSinkParallelism());
		
		try {
			env.execute(config.getCdcTaskname());
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

}
