package com.bw.ads;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/*
create table mn5TM4Wide (
    start_org_id bigint,
    end_org_id bigint,
    start_org_name string,
    end_org_name string,
    start_province_name string,
    end_province_name string,
    create_time string
)with(
    'connector' = 'kafka',
    'topic' = 'mn5tm4topic1',
    'properties.bootstrap.servers' = 'hadoop-single:9092',
    'properties.group.id' = 'group1',
    'scan.startup.mode' = 'earliest-offset',
    'format' = 'json'
)
 */
public class Mn5TM4_4 {
    public static void main(String[] args) {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        tEnv.executeSql("create table mn5TM4Wide (\n" +
                "    receiver_city_id bigint,\n" +
                "    receiver_city_name string,\n" +
                "    create_time string,\n" +
                "    times as to_timestamp(create_time),\n" +
                "    WATERMARK FOR times AS times - INTERVAL '0' SECOND\n" +
                ")with(\n" +
                "    'connector' = 'kafka',\n" +
                "    'topic' = 'mn5tm4topic2',\n" +
                "    'properties.bootstrap.servers' = 'hadoop102:9092',\n" +
                "    'properties.group.id' = 'group1',\n" +
                "    'scan.startup.mode' = 'earliest-offset',\n" +
                "    'format' = 'json'\n" +
                ")");

        Table table = tEnv.sqlQuery("select *,substring(create_time,1,13) h from mn5TM4Wide");
        tEnv.createTemporaryView("tmp1",table);

//        tEnv.sqlQuery("select * from tmp1").execute().print();
        //按日期统计各城市收件量的峰值时段，以小时为单位统计 (取24小时内2两个峰值)；
        Table table1 = tEnv.sqlQuery("" +
                "select " +
                " TUMBLE_START(times,INTERVAL '1' day) as wsStart," +
                " TUMBLE_END(times,INTERVAL '1' day) as wsEnd," +
                " h,receiver_city_name,count(1) rcnts from tmp1 " +
                "GROUP BY TUMBLE(times,INTERVAL '1' day),h,receiver_city_name");
        tEnv.createTemporaryView("tmp2",table1);
//

        /*
            10 20  郑州   90   1
            10 20  北京   80   2
         */
        tEnv.sqlQuery("" +
                " select * from " +
                "(select *,row_number() over(partition by wsStart,wsEnd,receiver_city_name order by rcnts desc) rk" +
                " from tmp2) where rk<=2").execute().print();

        table1.execute().print();

    }

}














