package cn.doitedu.sql;

import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class Demo03_WaterMarkTimeAttribute {

    public static void main(String[] args) {

        // 构造编程入口环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointStorage("file:///d:/ckpt");
        env.getCheckpointConfig().setCheckpointTimeout(5000);
        env.setParallelism(1);

        env.setStateBackend(new HashMapStateBackend());

        // 创建表环境
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);

        tenv.executeSql(
                "create table user_events_kafka(\n" +
                        "     uid bigint,       \n" +    // 物理字段定义
                        "     event_id string,  \n" +    // 物理字段定义
                        "     properties map<string,string>,\n" +  // 物理字段定义
                        "     action_time bigint,           \n" +  // 物理字段定义
                        "     eid as upper(event_id)," +   // 表达式字段定义
                        "     pt as proctime()," +         // 表达式字段定义,只不过 proctime()所得到的结果就代表“处理时间语义”的时间
                        "     xt as to_timestamp_ltz(action_time,3),"+  // 表达式字段
                        "     rt as to_timestamp_ltz(action_time,3),"+  // 表达式字段

                        "     watermark for rt as  rt - interval '0' second    "+  // 声明watermark,必须在一个timestamp类型的字段上声明
                        ") with (\n" +
                        "  'connector' = 'kafka',\n" +
                        "  'topic' = 'tpc-a',\n" +
                        "  'properties.bootstrap.servers' = 'doitedu:9092',\n" +
                        "  'properties.group.id' = 'doit47-g2',\n" +
                        "  'scan.startup.mode' = 'latest-offset',\n" +
                        "  'value.format' = 'json',\n" +
                        "  'value.fields-include' = 'EXCEPT_KEY'\n" +
                        ")");


        //tenv.executeSql("select * from user_events_kafka").print();

        tenv.executeSql("desc user_events_kafka").print();
        /**
         * +-------------+-----------------------------+-------+-----+---------------------------------------+----------------------------+
         * |        name |                        type |  null | key |                                extras |                  watermark |
         * +-------------+-----------------------------+-------+-----+---------------------------------------+----------------------------+
         * |         uid |                      BIGINT |  TRUE |     |                                       |                            |
         * |    event_id |                      STRING |  TRUE |     |                                       |                            |
         * |  properties |         MAP<STRING, STRING> |  TRUE |     |                                       |                            |
         * | action_time |                      BIGINT |  TRUE |     |                                       |                            |
         * |         eid |                      STRING |  TRUE |     |                  AS UPPER(`event_id`) |                            |
         * |          pt | TIMESTAMP_LTZ(3) *PROCTIME* | FALSE |     |                         AS PROCTIME() |                            |
         * |          xt |            TIMESTAMP_LTZ(3) |  TRUE |     | AS TO_TIMESTAMP_LTZ(`action_time`, 3) |                            |
         * |          rt |  TIMESTAMP_LTZ(3) *ROWTIME* |  TRUE |     | AS TO_TIMESTAMP_LTZ(`action_time`, 3) | `rt` - INTERVAL '0' SECOND |
         * +-------------+-----------------------------+-------+-----+---------------------------------------+----------------------------+
         */


    }




}
