package com.zyx.flinkdemo.sql.time;

import com.zyx.flinkdemo.sql.cons.CommonConfig;
import com.zyx.flinkdemo.sql.utils.ConnectUtils;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @author zyx
 * @since 2021/5/29 15:57
 * desc: 通过ddl语句指定事件时间
 */
public class EventTimeByDdl {
    public static void main(String[] args) {
        // 获取执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        // 使用DDL语句指定处理时间
        String fields = "id STRING,\n" +
                // ts单位为ms
                "ts BIGINT,\n" +
                "vc INT,\n" +
                // 数据源为long型纪元(epoch)时间,例如 1618989564564,建议定义在TIMESTAMP_LTZ 列上
                "time_ltz AS TO_TIMESTAMP_LTZ(ts, 3),\n" +
                // 老版本写法 => "time_ltz as to_timestamp(from_unixtime(ts/1000,'yyyy-MM-dd HH:mm:ss')),"
                // 源数据中的时间戳为形如 2020-04-15 20:13:40.564 的时间,则通常为不带时区信息的字符串值,建议将定义在 TIMESTAMP 列上
                // "user_action_time TIMESTAMP(3),\n"
                // 事件时间延时为5s
                "WATERMARK FOR time_ltz AS time_ltz - INTERVAL '5' SECOND";
                // "WATERMARK FOR user_action_time AS user_action_time - INTERVAL '5' SECOND"
        tableEnv.executeSql(ConnectUtils.getNormalKafkaSourceConnect("sensor_source", fields,
                "sensor_source", CommonConfig.KAFKA_SERVER,
                "src_consumer", "latest-offset", "csv"));
        // 打印表信息
        Table table = tableEnv.from("sensor_source");
        table.printSchema();
    }
}
