package cn.itcast.flink.connector;

import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.TableEnvironment;

/**
 *  * 从kafka Topic中消费数据，基于Table API Connector连接器实现从Kafka topic队列消费数据和保存FileSystem中
 * @author lilulu
 * @date 2023-04-10 17:40
 */
public class SqlConnectorFileSystemSinkDemo {
    public static void main(String[] args) {
        /**
         * 1、构建表执行环境
         * 2、定义输入表，从kafka消费数据
         * 3、定义输出表，将数据保存在kafka Topic队列中，数据格式为json格式
         * 4、编写sql，将结果查询插入到输出表中
         */
        TableEnvironment tableEnv = TableEnvironment.create(EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build());
        // 2. 定义输入表，从Kafka消费数据
        tableEnv.executeSql(
                "CREATE TABLE tbl_log_kafka (\n" +
                        "  `user_id` STRING,\n" +
                        "  `item_id` INTEGER,\n" +
                        "  `behavior` STRING,\n" +
                        "  `ts` STRING\n" +
                        ") WITH (\n" +
                        "  'connector' = 'kafka',\n" +
                        "  'topic' = 'log-topic',\n" +
                        "  'properties.bootstrap.servers' = 'node1.itcast.cn:9092,node2.itcast.cn:9092,node3.itcast.cn:9092',\n" +
                        "  'properties.group.id' = 'gid-1',\n" +
                        "  'scan.startup.mode' = 'latest-offset',\n" +
                        "  'format' = 'csv'\n" +
                        ")"
        );
        // 3. 定义输出表，将数据保存到kafka Topic队列中，数据格式为json字符串
        /**
         * sink.rolling-policy.file-size    128MB    MemorySize  分区文件的最大值，超过这个大小，将会启动一个新文件。
         * sink.rolling-policy.rollover-interval	30 m	Duration	分区文件滚动的最大时间间隔，超过这个时间，将会新启动一个文件
         * sink.rolling-policy.check-interval	1 m	Duration	一个时间间隔，定期去检查上面那个配置指定的策略下，文件是否应该滚动生成新文件.
         */
        tableEnv.executeSql(
                "CREATE TABLE tbl_log_fs_sink (\n" +
                        "  `user_id` STRING,\n" +
                        "  `item_id` INTEGER,\n" +
                        "  `behavior` STRING,\n" +
                        "  `ts` STRING\n" +
                        ") WITH (\n" +
                        "  'connector' = 'filesystem',\n" +
                        "  'path' = 'datas/track-logs',\n" +
                        "  'format' = 'parquet',\n" +
                        "  'sink.rolling-policy.file-size' = '3MB',\n" +
                        "  'sink.rolling-policy.rollover-interval' = '1 min',\n" +
                        "  'sink.rolling-policy.check-interval' = '1 min'\n" +
                        ")"
        );
        tableEnv.executeSql("insert into tbl_log_fs_sink select * from tbl_log_kafka");

    }
}
