package cn.tannn;

import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.SqlDialect;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @author tn
 * @date 2022-02-09 09:58
 */
public class KafkaToMySQLWitchSQL {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//        Configuration configuration = new Configuration();
//        configuration.setString(RestOptions.BIND_PORT, "8081-8089");
//        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(configuration);
//        env.getCheckpointConfig().setCheckpointStorage(new FileSystemCheckpointStorage("file:///F://project//java//flink-cdc-demo-1//doc"));
        // 每隔1000 ms进行启动一个检查点【设置checkpoint的周期】
        env.enableCheckpointing(3000);
        // 高级选项：
        // 设置模式为exactly-once （这是默认值）
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 确保检查点之间有至少500 ms的间隔【checkpoint最小间隔】
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(1500);
        // 检查点必须在一分钟内完成，或者被丢弃【checkpoint的超时时间】
        env.getCheckpointConfig().setCheckpointTimeout(60000);
        // 同一时间只允许进行一个检查点
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        // 表示一旦Flink处理程序被cancel后，会保留Checkpoint数据，以便根据实际需要恢复到指定的Checkpoint【详细解释见备注】
        //ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION:表示一旦Flink处理程序被cancel后，会保留Checkpoint数据，以便根据实际需要恢复到指定的Checkpoint
        //ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION: 表示一旦Flink处理程序被cancel后，会删除Checkpoint数据，只有job执行失败的时候才会保存checkpoint
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.setParallelism(1);

        EnvironmentSettings Settings = EnvironmentSettings.newInstance()
                .useBlinkPlanner()
                .inStreamingMode()
                .build();

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, Settings);
        tableEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT);


        // latest-offset earliest-offset
        String sourceDDL = "CREATE TABLE kafka_source ( " +
                "    id int,  " +
                "    name STRING, " +
                "    sex STRING " +
                ") WITH ( " +
                "  'connector' = 'kafka', " +
                "  'topic' = 'flink-cdc', " +
                "  'properties.bootstrap.servers' = '192.168.0.51:9092', " +
                "  'properties.group.id' = 'test-consumer-group', " +
                "  'scan.startup.mode' = 'latest-offset', " +
                "  'value.format' = 'json', " +
                "  'value.json.fail-on-missing-field' = 'false', " +
                "  'value.fields-include' = 'ALL' " +
                ")";

        String sinkDDL = "CREATE TABLE mysql_sink (  " +
                "     id INT,    " +
                "     name STRING,     " +
                "     sex STRING, " +
                "     primary key (id) not enforced  " +
                ") WITH (  " +
                " 'connector' = 'jdbc',  " +
                " 'driver' = 'com.mysql.cj.jdbc.Driver',  " +
                " 'url' = 'jdbc:mysql://192.168.0.51:3316/flink?serverTimezone=Asia/Shanghai&useSSL=false', " +
                " 'username' = 'root',  " +
                " 'password' = 'root',  " +
                " 'table-name' = 'mysql_sink'  " +
                ")";

        String transformDmlSQL = "insert into mysql_sink select * from kafka_source";
        tableEnv.executeSql(sourceDDL);
        tableEnv.executeSql(sinkDDL);
        tableEnv.executeSql(transformDmlSQL);
//        env.execute("KafkaToMySQLWitchSQL");
    }
}
