package com.pw.gmall.realtime.app.dws;

import com.pw.gmall.realtime.common.Constant;
import com.pw.gmall.realtime.entities.ProvinceStats;
import com.pw.gmall.realtime.utils.FlinkSinkUtils;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @Author: linux_future
 * @since: 2022/3/26
 **/
public class TestApp {
    public static void main(String[] args) {
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 3033);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(1);

        // 开启checkpoint
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);

        // 1. 建立动态表与Kafka的topic进行关联
        tEnv.executeSql("create table order_wide(" +
                " province_id bigint, " +
                " province_name string, " +
                " province_area_code string, " +
                " province_iso_code string, " +
                " province_3166_2_code string, " +
                " split_total_amount decimal(20, 2), " +
                " order_id bigint, " +
                " create_time string, " +
                " et as TO_TIMESTAMP(create_time), " +
                " watermark for et as et - interval '3' second " +
                ")with(" +
                "   'connector' = 'kafka', " +
                "   'properties.bootstrap.servers' = 'hadoop162:9092,hadoop163:9092,hadoop164:9092', " +
                "   'properties.group.id' = 'ProvinceStatsApp', " +
                "   'topic' = '" + Constant.TOPIC_DWM_ORDER_WIDE + "', " +
                "   'scan.startup.mode' = 'earliest-offset', " +
                "   'format' = 'json' " +
                ")");

        // 2. 开窗聚合
        Table table = tEnv.sqlQuery("select" +
                " province_id," +
                " province_name," +
                " province_area_code area_code," +
                " province_iso_code iso_code, " +
                " province_3166_2_code iso_3166_2," +
                " date_format(tumble_start(et, interval '5' second), 'yyyy-MM-dd HH:mm:ss') stt, " +
                " date_format(tumble_end(et, interval '5' second), 'yyyy-MM-dd HH:mm:ss') edt, " +
                " sum(split_total_amount) order_amount, " +
                " count(distinct(order_id)) order_count, " +
                " unix_timestamp()*1000 ts " +
                "from order_wide " +
                "group by province_id, province_name, province_area_code, province_iso_code, province_3166_2_code, " +
                " tumble(et, interval '5' second )");

              String sql="select " +
                      " province_id,province_name," +
                      " window_start,window_end,window_time ,unix_timestamp()*1000 ts" +
                      " from table(tumble(table order_wide,descriptor(et),interval '5' second))" +
                      " group by province_id,province_name,window_start,window_end,window_time";
        tEnv.sqlQuery(sql).execute().print();
        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

}
