package com.zhang.gmall.realtime.app.dws;

import com.zhang.gmall.realtime.beans.ProvinceStatsSQL;
import com.zhang.gmall.realtime.utils.ClickHouseUtil;
import com.zhang.gmall.realtime.utils.MyKafkaUtil;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @title: 地区主题统计
 * @author: zhang
 * @date: 2022/3/15 17:37
 */
public class ProvinceStatsSqlAppPlus {
    public static void main(String[] args) throws Exception {
        //TODO 1.获取流执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        //TODO 2.获取表执行环境
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        //TODO 3.检查点设置
        //TODO 4.从kafka读取数据创建动态表，生成watermark和提取事件时间字段
        //创建下单表
        String topic = "dwm_order_wide_2022";
        String groupID = "provinceStatsSqlApp";
        tableEnv.executeSql("create table order_wide (" +
                " order_id BIGINT," +
                " province_id BIGINT," +
                " province_name STRING," +
                " province_area_code STRING," +
                " province_iso_code STRING," +
                " province_3166_2_code STRING," +
                " split_total_amount DOUBLE," +
                " create_time STRING ," +
                " rt as TO_TIMESTAMP(create_time)," +
                " WATERMARK FOR rt AS rt - INTERVAL '5' SECOND" +
                ") WITH (" + MyKafkaUtil.getKafkaDDl(topic, groupID) +
                ")");

        //tableEnv.sqlQuery("select * from order_wide").execute().print();

        //创建page_log计算pv
        String pageTopic = "dwd_page_log_2022";
        tableEnv.executeSql(
                "create table page_log (" +
                        " common MAP<STRING,STRING>," +
                        " page MAP<STRING,STRING>," +
                        " ts BIGINT," +
                        " rt as TO_TIMESTAMP(FROM_UNIXTIME(ts/1000))," +
                        " WATERMARK FOR rt AS rt - INTERVAL '5' SECOND" +
                        ") WITH (" + MyKafkaUtil.getKafkaDDl(pageTopic, groupID) + ")"
        );
        Table pvTable = tableEnv.sqlQuery(
                "select " +
                        " DATE_FORMAT(TUMBLE_START(rt, INTERVAL '10' second),'yyyy-MM-dd HH:mm:ss') stt," +
                        " DATE_FORMAT(TUMBLE_end(rt, INTERVAL '10' second),'yyyy-MM-dd HH:mm:ss') edt, " +
                        " common['ar'] ar," +
                        " count(*) pvCnt," +
                        " 0 uvCnt," +
                        " UNIX_TIMESTAMP()*1000 ts " +
                        " from page_log " +
                        " GROUP BY TUMBLE(rt, INTERVAL '10' second),common['ar']"
        );

        //创建uv_log计算uv
        String uvTopic = "dwm_unique_visit_2022";
        tableEnv.executeSql(
                "create table uv_log (" +
                        " common MAP<STRING,STRING>," +
                        " page MAP<STRING,STRING>," +
                        " ts BIGINT," +
                        " rt as TO_TIMESTAMP(FROM_UNIXTIME(ts/1000))," +
                        " WATERMARK FOR rt AS rt - INTERVAL '5' SECOND" +
                        ") WITH (" + MyKafkaUtil.getKafkaDDl(uvTopic, groupID) + ")"
        );

        Table uvTable = tableEnv.sqlQuery(
                "select " +
                        " DATE_FORMAT(TUMBLE_START(rt, INTERVAL '10' second),'yyyy-MM-dd HH:mm:ss') stt," +
                        " DATE_FORMAT(TUMBLE_end(rt, INTERVAL '10' second),'yyyy-MM-dd HH:mm:ss') edt, " +
                        " common['ar'] ar," +
                        " 0 pvCnt," +
                        " count(*) uvCnt," +
                        " UNIX_TIMESTAMP()*1000 ts " +
                        " from uv_log " +
                        " GROUP BY TUMBLE(rt, INTERVAL '10' second),common['ar']"
        );

        Table pvUvTable = tableEnv.sqlQuery(
                "SELECT  * " +
                        "FROM (" +
                        "    (SELECT * FROM " + pvTable + ")" +
                        "  UNION" +
                        "    (SELECT * FROM " + uvTable + ")" +
                        ")"
        );


        //地区维度表
        tableEnv.executeSql("CREATE TABLE DIM_BASE_PROVINCE (" +
                " id STRING," +
                " INFO ROW<NAME STRING,REGION_ID STRING,AREA_CODE STRING,ISO_CODE STRING,ISO_3166_2 STRING>," +
                " PRIMARY KEY (id) NOT ENFORCED" +
                ") WITH (" +
                "'connector' = 'hbase-2.2'," +
                "'table-name' = 'GMALL_REALTIME_TEST:DIM_BASE_PROVINCE'," +
                "'zookeeper.quorum' = 'hadoop102:2181'" +
                ")");

        Table dimProvince = tableEnv.sqlQuery(
                "select " +
                        " id ," +
                        " NAME,REGION_ID,AREA_CODE,ISO_CODE,ISO_3166_2 " +
                        " from DIM_BASE_PROVINCE"
        );



        //TODO 1.执行任务
        env.execute();
    }
}
