package com.ygx.flink.practice.sql;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.PipelineOptions;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @author YangGuoXiang
 * @version 1.0
 * @date 2022/3/31 11:53
 */
public class KafkaToMysql {

    public static void main(String[] args) throws Exception {

//        Configuration conf = new Configuration();
//        conf.setString(RestOptions.BIND_PORT, "8081-8089");
//        final StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(conf);
//        env.setParallelism(1);
//        EnvironmentSettings bsSettings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
//        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, bsSettings);
//        Configuration config = tableEnv.getConfig().getConfiguration();
//        config.setString(PipelineOptions.NAME, "SqlKafkaToMysql");

        // 环境准备
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        EnvironmentSettings bsSettings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, bsSettings);
        Configuration config = tableEnv.getConfig().getConfiguration();
        config.setString(PipelineOptions.NAME, "SqlKafkaToMysql");

        // start a checkpoint every 1000 ms
        env.enableCheckpointing(1000);

        // advanced options:
        // set mode to exactly-once (this is the default)
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);

        // make sure 500 ms of progress happen between checkpoints
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);

        // checkpoints have to complete within one minute, or are discarded
        env.getCheckpointConfig().setCheckpointTimeout(60000);

        // only two consecutive checkpoint failures are tolerated
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(2);

        // allow only one checkpoint to be in progress at the same time
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);

        // enable externalized checkpoints which are retained
        // after job cancellation
//        env.getCheckpointConfig().enableExternalizedCheckpoints(
//                CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        // enables the unaligned checkpoints
        env.getCheckpointConfig().enableUnalignedCheckpoints();

        // sets the checkpoint storage where checkpoint snapshots will be written
        env.getCheckpointConfig().setCheckpointStorage("hdfs://cdh01:8020/flink/checkpoint/SqlKafkaToMysql");

        // create an input Table
        tableEnv.executeSql(
                "CREATE TABLE user_log (" +
                        "    user_id VARCHAR," +
                        "    item_id VARCHAR," +
                        "    category_id VARCHAR," +
                        "    behavior VARCHAR," +
                        "    ts TIMESTAMP_LTZ(3)" +
                        ") WITH (" +
                        "    'connector' = 'kafka'," +
                        "    'topic' = 'user_behavior'," +
                        "    'scan.startup.mode' = 'group-offsets'," +
                        "    'properties.bootstrap.servers' = 'cdh04:9092,cdh05:9092,cdh06:9092'," +
                        "    'properties.group.id' = 'pvuv_sql'," +
                        "    'format' = 'json'," +
                        "    'json.ignore-parse-errors' = 'true'," +
                        "    'json.timestamp-format.standard' = 'ISO-8601'" +
                        ")");

        // register an output Table
        // sink.buffer-flush.max-rows:100   默认值100，flush 前缓存记录的最大值，可以设置为 '0' 来禁用它。
        // sink.buffer-flush.interval:1s    flush 间隔时间，超过该时间后异步线程将 flush 数据。可以设置为 '0' 来禁用它。
//                                      注意, 为了完全异步地处理缓存的 flush 事件，可以将 'sink.buffer-flush.max-rows' 设置为 '0' 并配置适当的 flush 时间间隔。
        tableEnv.executeSql("" +
                "CREATE TABLE pvuv_sink (" +
                "    dt VARCHAR," +
                "    pv BIGINT," +
                "    uv BIGINT," +
                "    PRIMARY KEY (dt) NOT ENFORCED" +
                ") WITH (" +
                "    'connector' = 'jdbc'," +
                "    'url' = 'jdbc:mysql://192.168.5.9:3306/flink-test?useSSL=false&serverTimezone=UTC'," +
                "    'username' = 'root'," +
                "    'password' = '123456'," +
                "    'table-name' = 'pvuv_sink_sql'," +
                "    'sink.buffer-flush.max-rows' = '0'," +
                "    'sink.buffer-flush.interval' = '1s'" +
                ")");

        tableEnv.executeSql("" +
                "INSERT INTO pvuv_sink" +
                " SELECT" +
                "  DATE_FORMAT(ts, 'yyyy-MM-dd HH:00') dt," +
                "  COUNT(*) AS pv," +
                "  COUNT(DISTINCT user_id) AS uv" +
                " FROM user_log" +
                " GROUP BY DATE_FORMAT(ts, 'yyyy-MM-dd HH:00')");

//        Table tableResult = tableEnv.sqlQuery("SELECT ts FROM user_log");
//        tableEnv.toAppendStream(tableResult,Row.class).print("tableResult");
//        env.execute("SqlKafkaToMysql");
    }
}
