package com.apps.sdses.flink141.cdc;

import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * @author jiwei
 * @description
 * @date 2023/7/17 9:08
 */
public class MySqlSourceExample {
    public static void main(String[] args) throws Exception {
        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
                .hostname("192.168.102.155")
                .port(33066)
                .databaseList("db_datacube_test") // 设置捕获的数据库， 如果需要同步整个数据库，请将 tableList 设置为 ".*".
                .tableList("db_datacube_test.water_sensor_view") // 设置捕获的表
                .username("datacube_test")
                .password("Gbd#XERA@19d1ZtD")
                .startupOptions(StartupOptions.initial())
                .deserializer(new JsonDebeziumDeserializationSchema(false))
                .build();

        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration());
        env.setParallelism(1);

        // 设置 3s 的 checkpoint 间隔
        env.enableCheckpointing(3000);


        env
                .fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "MySQL Source")
                // 设置 source 节点的并行度为 4
                .setParallelism(1)
                .print().setParallelism(1); // 设置 sink 节点并行度为 1


        //Kafka Sink
//        KafkaSink<String> kafkaSink = KafkaSink.<String>builder()
//                .setBootstrapServers("192.168.1.2:9092")
//                .setRecordSerializer(
//                        KafkaRecordSerializationSchema.builder()
//                        .setTopic("drity_data_sink")
//                        .setValueSerializationSchema(new SimpleStringSchema())
//                        .build()
//                )
//                .setDeliverGuarantee(DeliveryGuarantee.AT_LEAST_ONCE)
//                .build();
//
//        env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "MySQL Source")
//                .sinkTo(kafkaSink);

//        SingleOutputStreamOperator<String> processDataStream = env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "MySQL Source")
//                .process(new ProcessFunction<String, String>() {
//                    private transient Gson gson;
//
//                    @Override
//                    public void open(Configuration parameters) throws Exception {
//                        gson = new Gson();
//                    }
//
//                    @Override
//                    public void processElement(String value, Context ctx, Collector<String> out) throws Exception {
//                        JsonObject jsonObject = gson.fromJson(value, JsonObject.class);
//                        String tableName = jsonObject.get("source").getAsJsonObject().get("table").getAsString();
//                        OutputTag<String> outputTag = new OutputTag<String>("record-" + tableName){};
//                        ctx.output(outputTag, value);
//                    }
//                });
//
//        OutputTag<String> outputTag = new OutputTag<String>("record-orders"){};
//        DataStream<String> orderDataStream = processDataStream.getSideOutput(outputTag);
//        DataStream<String> productsDataStream = processDataStream.getSideOutput(new OutputTag<String>("record-products"){});
//
//
//        orderDataStream.print("=============orders:");
//        productsDataStream.print("=============products:");

        env.execute("Print MySQL Snapshot + Binlog");

    }
}
