package com.atguigu.bigdata.chapter11.window;

import com.atguigu.bigdata.bean.WaterSensor;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import static org.apache.flink.table.api.Expressions.$;

/**
 * @Author lzc
 * @Date 2022/9/9 14:09
 */
public class Flink06_Over_SQL {
    public static void main(String[] args) {
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", 2000);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(1);
        
        DataStream<WaterSensor> stream = env
            .fromElements(
                new WaterSensor("sensor_1", 1000L, 10),
                new WaterSensor("sensor_2", 2000L, 20),
                new WaterSensor("sensor_1", 3001L, 30),
                new WaterSensor("sensor_1", 3001L, 40),
                new WaterSensor("sensor_2", 6000L, 50),
                new WaterSensor("sensor_1", 8000L, 80)
            )
            .assignTimestampsAndWatermarks(
                WatermarkStrategy
                    .<WaterSensor>forMonotonousTimestamps()
                    .withTimestampAssigner((ws, ts) -> ws.getTs())
            );
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        
        Table table = tEnv.fromDataStream(stream, $("id"), $("ts"), $("vc"), $("et").rowtime());
        tEnv.createTemporaryView("sensor", table);
        // 在sql中使用
        
        /*tEnv
            .sqlQuery("select " +
                          " *, " +
                          //                          " sum(vc) over(partition by id order by et rows between unbounded preceding and current row)  as vc_sum " +
                          //                          " sum(vc) over(partition by id order by et rows between 1 preceding and current row)  as vc_sum " +
//                          " sum(vc) over(partition by id order by et range between unbounded preceding and current row)  as vc_sum " +
//                          " sum(vc) over(partition by id order by et range between interval '2' second preceding and current row)  as vc_sum " +
                          " sum(vc) over(partition by id order by et)  as vc_sum " +  // 默认是时间这种正交性  partition by id order by et range between unbounded preceding and current row
                          "from sensor")
            .execute()
            .print();*/
        
        
        // 1. 同一个sql中的over窗口必须一样
        tEnv
            .sqlQuery("select " +
                          " *, " +
                          " sum(vc) over w  as vc_sum, " +
                          " max(vc) over w  as max_sum " +
                          "from sensor " +
                          "window w as(partition by id order by et rows between unbounded preceding and current row)")
            .execute()
            .print();
        
    }
}
/*
group window: 分组
tvf: 替代 group window
    支持分组集 grouping sets
------
over 窗口
    非常大的用处: topN
    
    
select
    *,
    sum(vc) over( partition by id order by et rows between unbounded preceding and current row )

from sensor

over窗口的正交性:
    是按照行,还是按照时间来划分窗口

*/
