package com.cd;

import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

public class TestApp {

    public static void main(String[] args) {

        //1.获取执行环境
//        Configuration conf = new Configuration();
//        //设置web端口
//        conf.setInteger("rest.port",10000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //2.通过flink cdc 读取mysql中的维度数据并创建流
        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
                .hostname("192.168.100.189:30001")
                .port(3306)
                .username("root")
                .password("123456")
                //设置mysql数据库
                .databaseList("cdp_analytics_point")
                //设置mysql表(多个用,分隔)
                .tableList("point_event_attribute")
                //设置cdc启动方式
                .startupOptions(StartupOptions.initial())
                //设置反序列化器
                .deserializer(new JsonDebeziumDeserializationSchema())
                .build();
        DataStreamSource<String> mySQLSource = env
                .fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "MySQL Source")
                // set 4 parallel source tasks
                .setParallelism(4);

        //3.数据同步到kafka
        KafkaSink<String> kafkaSink = KafkaSink.<String>builder()
                //指定Kafka的连接地址
                .setBootstrapServers("192.168.100.189:30001")
                //指定序列化器
                .setRecordSerializer(
                        KafkaRecordSerializationSchema.<String>builder()
                                .setTopic("Test")
                                .setValueSerializationSchema(new SimpleStringSchema())
                                .build()
                )
                //写入kafka的一致性级别
                .setDeliverGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
                //如果是精确一次，必须设置事务的前缀
                .setTransactionalIdPrefix("zhike-")
                //如果是精确一次必须设置事务超时时间
//                .setProperty(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, "300000")
                .build();

        mySQLSource.sinkTo(kafkaSink);
        //4.执行任务
        try {
            env.execute("mysql2Kafka");
        } catch (Exception e) {
            throw new RuntimeException(e);
        }

    }
}