package cn.doitedu.demo.topn;

import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class 分组topn计算 {

    public static void main(String[] args) {

        // 构造stream api 编程的环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointStorage("file:///d:/ckpt");
        env.setParallelism(1);

        // 构造 sql 编程环境
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);


        // 创建数据源映射表
        tenv.executeSql("CREATE TABLE t1                  (\n" +
                "  uid BIGINT,                               \n" +
                "  event_id STRING,                          \n" +
                "  properties MAP<STRING,STRING>,            \n" +
                "  action_time BIGINT                        \n" +

                ") WITH (                                    \n" +
                "  'connector' = 'kafka',                    \n" +
                "  'topic' = 'ss-1',                         \n" +
                "  'properties.bootstrap.servers' = 'doitedu:9092',\n" +
                "  'properties.group.id' = 'doit44_g1',      \n" +
                "  'scan.startup.mode' = 'latest-offset',    \n" +
                "  'value.format' = 'json',                  \n" +
                "  'value.fields-include' = 'EXCEPT_KEY'     \n" +
                ")");


        // 实时统计，每个页面上访问次数最多的前2个用户
        tenv.executeSql(
                "SELECT\n" +
                "    url,\n" +
                "    uid,\n" +
                "    pv,\n" +
                "    rn\n" +
                "FROM (\n" +
                "    SELECT\n" +
                "        url,\n" +
                "        uid,\n" +
                "        pv,\n" +
                "        row_number() over(partition by url order by pv desc) as rn \n" +
                "    FROM \n" +
                "    (\n" +
                "        SELECT\n" +
                "            properties['url'] as url,\n" +
                "            uid,\n" +
                "            count(1) as pv \n" +
                "        FROM t1 \n" +
                "        group by \n" +
                "            properties['url'],\n" +
                "            uid\n" +
                "    ) o1     \n" +
                ") o2    \n" +
                "WHERE rn<=2 ").print();


    }


}
