package cn.doitedu.sql;

import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class Demo01_PageCount {
    public static void main(String[] args) {

        // 构造编程入口环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointStorage("file:///d:/ckpt");
        env.getCheckpointConfig().setCheckpointTimeout(5000);
        env.setParallelism(1);

        env.setStateBackend(new HashMapStateBackend());

        // 创建表环境
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);


        // 建表，映射数据源
        // default_catalog.default_database.user_events_kafka
        tenv.executeSql(
                "create table user_events_kafka(\n" +
                "     uid bigint,\n" +
                "     event_id string,\n" +
                "     properties map<string,string>,\n" +
                "     action_time bigint\n" +
                ") with (\n" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = 'tpc-a',\n" +
                "  'properties.bootstrap.servers' = 'doitedu:9092',\n" +
                "  'properties.group.id' = 'doit47-g2',\n" +
                "  'scan.startup.mode' = 'latest-offset',\n" +
                "  'value.format' = 'json',\n" +
                "  'value.fields-include' = 'EXCEPT_KEY'\n" +
                ")");





        //tenv.executeSql("select * from user_events_kafka").print();


        // 建表，映射目标数据源
        // kafka连接器，充当sink表时，是不支持处理 update 语义的 （-U/+U/-D）
//        tenv.executeSql(
//                "create table page_count_kafka(\n" +
//                "    url string,\n" +
//                "    cnt bigint\n" +
//                ") with (\n" +
//                "  'connector' = 'kafka',\n" +
//                "  'topic' = 'tpc-b',\n" +
//                "  'properties.bootstrap.servers' = 'doitedu:9092',\n" +
//                "  'value.format' = 'json',\n" +
//                "  'value.fields-include' = 'EXCEPT_KEY'\n" +
//                ")");


        // upsert-kafka连接器，支持处理 update 语义的 （-U/+U/-D）
//        tenv.executeSql("CREATE TABLE page_count_kafka      ( \n" +
//                "  url STRING,                                \n" +
//                "  pv  BIGINT,                                \n" +
//                "  primary key(url) not enforced              \n" +
//                ") WITH (                                     \n" +
//                "  'connector' = 'upsert-kafka',              \n" +
//                "  'topic' = 'tpc-b',                         \n" +
//                "  'properties.bootstrap.servers' = 'doitedu:9092',\n" +
//                "  'key.format' = 'json',                  \n" +
//                "  'value.format' = 'json',                  \n" +
//                "  'value.fields-include' = 'EXCEPT_KEY'     \n" +
//                ")");

        // jdbc连接器，既可以充当source，也可以充当sink，而且，充当sink时，支持消费update语义
        tenv.executeSql("CREATE TABLE page_count_kafka      ( \n" +
                "  url STRING,                                \n" +
                "  pv  BIGINT,                                \n" +
                "  primary key(url) not enforced              \n" +
                ") WITH (                                     \n" +
                "  'connector' = 'jdbc',                     \n" +
                "  'url' = 'jdbc:mysql://doitedu:3306/doit47', \n" +
                "  'table-name' = 'page_pv',                 \n" +
                "  'username' = 'root',                      \n" +
                "  'password' = 'root'                       \n" +
                ")");

        // jdbc连接器，测试充当source
        // jdbc连接器充当source时候，它底层的基本机制是：发送一个select * 语句，得到这一瞬间的物理表的数据快照
        // 后续表中再有变化，这个jdbc连接器source都不会再读了
        tenv.executeSql("select * from page_count_kafka").print();



        // 写一个运算sql
        // 统计每个页面的访问次数
//        tenv.executeSql(
//                "INSERT INTO page_count_kafka " +
//                "SELECT \n" +
//                "    properties['url'] as url,\n" +
//                "    count(1) as cnt\n" +
//                "from user_events_kafka\n" +
//                "where event_id = 'page_load'\n" +
//                "group by properties['url']");


        //tenv.executeSql("select * from page_count_kafka").print();







        // 统计每个页面的访问人数
        tenv.executeSql(
                "SELECT\n" +
                "properties['url'] as url,\n" +
                "count(distinct uid) as uv\n" +
                "from user_events_kafka\n" +
                "where event_id = 'page_load'\n" +
                "group by properties['url']");



        // 统计每个用户访问次数最多的前2个页面及访问次数
        tenv.executeSql(
                "SELECT\n" +
                "    uid,\n" +
                "    url,\n" +
                "    cnt,\n" +
                "    rn\n" +
                "from (\n" +
                "    SELECT\n" +
                "        uid,\n" +
                "        url,\n" +
                "        cnt,\n" +
                "        row_number() over(partition by uid order by cnt desc ) as rn\n" +
                "    from (\n" +
                "    \n" +
                "        SELECT\n" +
                "        \n" +
                "        uid,\n" +
                "        properties['url'] as url,\n" +
                "        count(1) as cnt\n" +
                "        \n" +
                "        from user_events_kafka\n" +
                "        where event_id = 'page_load'\n" +
                "        group by uid,properties['url']\n" +
                "    ) o1\n" +
                ") o2\n" +
                "where rn<=2");




    }
}
