package com.cd;

import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.source.utils.TableDiscoveryUtils;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import io.debezium.ddl.parser.mysql.generated.MySqlParser;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.kafka.common.config.AbstractConfig;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.Properties;


/**
 * flink-1.14.4
 * yarn.provided.lib.dirs 指定flink-1.14.4的lib路径
 * hdfs上面只保留flink-sql-connector-mysql-cdc-2.4.0.jar这一个connector即可。
 * ./flink run-application -t yarn-application --detached -Dyarn.application.name="mysqlCDC2kafka"  \
 * -Dyarn.queue.name=root -Dyarn.provided.lib.dirs="hdfs://changdu/flink-jar/"  -yjm 1024m -ytm 1024m -ynm mysqlcdc2kafka -c com.cd.MysqlCDC2Kafka mysql2kafks.jar
 *
 */
public class MysqlCDC2Kafka {
    public static void main(String[] args) {
        Properties properties = new Properties();
        properties.put("jdbc.properties.useSSL", "false");
        properties.put("useSSL", "false");
        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
                .hostname("192.168.100.160")
                .port(3306)
                .databaseList("cdp_analytics_point") // set captured database
                .tableList("cdp_analytics_point.point_event_attribute") // set captured table
                .jdbcProperties(properties)
                .username("root")
                .password("123456")
                .includeSchemaChanges(true)
                .startupOptions(StartupOptions.initial())
                .deserializer(new JsonDebeziumDeserializationSchema()) // converts SourceRecord to JSON String
                .build();
        Configuration configuration = new Configuration();
        //设置classloader.resolve-order to false
        configuration.setString("classloader.resolve-order", "parent-first");
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(configuration);

        // enable checkpoint
        env.enableCheckpointing(3000);
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);

        env
                .fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "MySQL Source")
                // set 4 parallel source tasks
                .setParallelism(4)
                //add kafka sink
//                .addSink(getKafkaProducer())
                .sinkTo(getFlinkProducer())
                .setParallelism(1); // use parallelism 1 for sink to keep message ordering

        try {
            env.execute("mysqlCDC2kafka");
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
//        DebeziumUtils.openJdbcConnection()
        org.apache.kafka.common.serialization.Serializer serializer;
        StringSerializer stringSerializer;
        AbstractConfig abstractConfig;
        MySqlParser mySqlParser;
        TableDiscoveryUtils tableDiscoveryUtils;
    }

    private static KafkaSink getFlinkProducer() {
        String kafka_server = "192.168.100.189:30001";
        String topic = "Test";
        //get flink kafka sink
        return KafkaSink.<String>builder()
                .setBootstrapServers(kafka_server)
                .setKafkaProducerConfig(getKafkaProperties2())
                .setDeliverGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
                .setRecordSerializer(KafkaRecordSerializationSchema.builder()
                        .setTopic(topic)
                        .setValueSerializationSchema(new SimpleStringSchema())
                        .build()
                )
                .setTransactionalIdPrefix("flink-kafka-")
                .build();
    }

    public static FlinkKafkaProducer getKafkaProducer() {
        String kafka_server = "192.168.100.189:30001";
        String topic = "Test";
        return new FlinkKafkaProducer(kafka_server, topic, new SimpleStringSchema());
    }

    private static Properties getKafkaProperties2() {
        Properties properties = new Properties();
        properties.setProperty("transaction.timeout.ms", "60000");
        properties.setProperty("acks", "all");
        properties.setProperty("retries", "3");
        properties.setProperty("linger.ms", "1");
        properties.setProperty("batch.size", "16384");
        properties.setProperty("buffer.memory", "33554432");
        properties.setProperty("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        return properties;
    }
}
