package cn._51doit.live.jobs;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * 完成WindowTopN的官方例子
 */
public class SQLHotGoodsTopN {


    public static void main(String[] args) {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);

        tEnv.executeSql(
                "CREATE TABLE kafka_event (\n" +
                        "  `cid` String,\n" +
                        "  `pid` String,\n" +
                        "  `type` String,\n" +
                        "  `ts` BIGINT, \n" +
                        "  `etime` as TO_TIMESTAMP(FROM_UNIXTIME(ts / 1000)), \n" +
                        "   WATERMARK FOR etime as etime - INTERVAL '0' SECONDS \n" +
                        ") WITH (\n" +
                        "  'connector' = 'kafka',\n" +
                        "  'topic' = 'event-csv10',\n" +
                        "  'properties.bootstrap.servers' = 'node-1.51doit.cn:9092,node-2.51doit.cn:9092,node-3.51doit.cn:9092',\n" +
                        "  'scan.startup.mode' = 'earliest-offset',\n" +
                        "  'csv.ignore-parse-errors' = 'true', \n" +
                        "  'format' = 'csv'\n" +
                        ")"
        );

        //TableResult tableResult = tEnv.executeSql("desc kafka_event");

        //从视图中取数据插入到Sink表中
        tEnv.executeSql(
                "create TEMPORARY view v_temp as SELECT cid, pid, type, window_start, window_end, count(*) counts \n" +
                        "  FROM TABLE(\n" +
                        "    HOP(TABLE kafka_event, DESCRIPTOR(etime), INTERVAL '2' SECONDS, INTERVAL '10' SECONDS))\n" +
                        "  GROUP BY cid, pid, type, window_start, window_end"
        );

        TableResult tableResult = tEnv.executeSql(
                "select * from (select cid, pid, type, window_start, window_end, counts, row_number() over(partition by cid, type, window_start, window_end order by counts DESC) rn from v_temp) where rn <= 3"
        );

        tableResult.print();




    }
}
