package cn.doitedu.sql;

import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @Author: 深似海
 * @Site: <a href="www.51doit.com">多易教育</a>
 * @QQ: 657270652
 * @Date: 2024/3/4
 * @Desc: 学大数据，上多易教育
 *   每个页面上访问次数最多的前2个用户
 **/
public class _09_GroupTopn {
    public static void main(String[] args) {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(1000, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointStorage("file:///d:/ckpt");

        env.setParallelism(2);


        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);
        tenv.getConfig().set("table.exec.source.idle-timeout","1s");


        // 建表，映射kafka中的用户行为日志topic
        tenv.executeSql(
                " create table action_log(                              "+
                        "   uid int,                                            "+
                        " 	event_id string,                                    "+
                        " 	properties map<string,string>,                      "+
                        " 	action_time bigint,                                 "+
                        " 	pt as proctime(),                                   "+
                        " 	rt as to_timestamp_ltz(action_time,3),              "+
                        " 	watermark for rt as rt                              "+
                        "  ) with (                                             "+
                        "    'connector' = 'kafka',                             "+
                        "    'topic' = 'action-log',                            "+
                        "    'properties.bootstrap.servers' = 'doitedu:9092',   "+
                        "    'properties.group.id' = 'g003',                    "+
                        "    'scan.startup.mode' = 'latest-offset',             "+
                        "    'value.format' = 'json',                           "+
                        "    'value.fields-include' = 'EXCEPT_KEY'              "+
                        " )                                                     "
        );



        // 建表，映射mysql中的目标表
        tenv.executeSql(
                "CREATE TABLE topn_test1_mysql ( " +
                        "    url string,   " +
                        "    rn  bigint,   " +
                        "    uid int,    " +
                        "    cnt bigint,    " +
                        "    primary key (url,rn)   not  enforced  " +
                        ") with (             " +
                        "    'connector' = 'jdbc',      " +
                        "    'url' = 'jdbc:mysql://doitedu:3306/doit46', " +
                        "    'table-name' = 'topn_test1', " +
                        "    'username' = 'root',       " +
                        "    'password' = 'root'       " +
                        ")");




        // 写统计sql
        tenv.executeSql(
                        " INSERT INTO topn_test1_mysql                                        "+
                        " WITH tmp AS (                                                       "+
                        "     SELECT                                                          "+
                        "         properties['url'] as url,                                   "+
                        "     	  uid,                                                         "+
                        "     	  count(event_id) filter(where event_id = 'page_load') as cnt  "+
                        "     FROM action_log                                                "+
                        "     GROUP BY                                                       "+
                        "         properties['url'],                                         "+
                        "     	  uid                                                        "+
                        " )                                                                  "+
                        " SELECT                                                               "+
                        "   url,                                                               "+
                        "   rn,                                                                "+
                        "   uid,                                                               "+
                        "   cnt                                                                "+
                        " FROM  (                                                              "+
                        "     SELECT                                                           "+
                        "         url,                                                         "+
                        "     	uid,                                                           "+
                        "     	cnt,                                                           "+
                        "     	row_number() over(partition by url order by cnt desc ) as rn   "+
                        "     FROM tmp                                                         "+
                        " ) tmp2                                                               "+
                        " WHERE rn<=2                                                          "
        );

    }


}
