package com.atguigu.flink.sql;

import com.atguigu.flink.function.WaterSensorMapFunction;
import com.atguigu.flink.pojo.WaterSensor;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import static org.apache.flink.table.api.Expressions.$;

/**
 * Created by Smexy on 2022/12/21
 *
 *  多维分析
 *          select
 *              xx
 *          from xxx
 *          group by a,b,c
 *          grouping sets( (a,b),(a),(b) )  hive有，flink TVF支持
 *          cube:  ck,hive有，flink TVF支持
 *          rollup: ck,hive有，flink TVF支持
 *          withTotal: ck中有
 */
public class Demo12_TVFGroupingSets
{
    public static void main(String[] args) {

        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 3333);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);

        env.getConfig().setAutoWatermarkInterval(2000);

        env.setParallelism(1);

        //为了获取eventtime
        WatermarkStrategy<WaterSensor> watermarkStrategy = WatermarkStrategy.<WaterSensor>forMonotonousTimestamps()
            .withTimestampAssigner( (e, r) -> e.getTs());

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        SingleOutputStreamOperator<WaterSensor> ds = env
            .socketTextStream("hadoop103", 8888)
            .map(new WaterSensorMapFunction())
            .assignTimestampsAndWatermarks(watermarkStrategy)
            ;


        Table table = tableEnv.fromDataStream(ds, $("ts"),$("vc"),$("id"),$("pt").proctime(),
            $("et").rowtime());

        //给表起名字
        tableEnv.createTemporaryView("ws",table);

        /*
                滚动

                如果要使用flink的 grouping sets 语法和hive和ck还不太一样。
                        group by 后面
                                hive :  group by  id(字段)
                                flink:  固定写法，只能写 窗口的起始和终止
         */
        String tumbleSql = "SELECT window_start, window_end, id , SUM(vc) sumVC" +
            "  FROM TABLE(" +
            "    TUMBLE(TABLE ws, DESCRIPTOR(et), INTERVAL '5' SECONDS))" +
            "  GROUP BY window_start, window_end,  " +
            "  rollup( (id) ) "
            //"  cube( (id) ) "
            // "  grouping sets( (id),()  ) "
            ;



        tableEnv.executeSql(tumbleSql)
                .print();

    }
}
