package com.atguigu.flink.sql.window;

import com.atguigu.flink.function.WaterSensorMapFunction;
import com.atguigu.flink.pojo.WaterSensor;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Over;
import org.apache.flink.table.api.OverWindow;
import org.apache.flink.table.api.Schema;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import static org.apache.flink.table.api.Expressions.*;

/**
 * Created by Smexy on 2023/4/14
 */
public class Demo7_OverAggSQL
{
    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tableEnvironment = StreamTableEnvironment.create(env);

        env.setParallelism(1);


        SingleOutputStreamOperator<WaterSensor> ds = env
            .socketTextStream("hadoop102", 8888)
            .map(new WaterSensorMapFunction());

        Schema schema = Schema.newBuilder()
                              .column("id", "STRING")
                              .column("ts", "BIGINT")
                              .column("vc", "INT")
                              .columnByExpression("pt", "proctime()")
                              .columnByExpression("et", "TO_TIMESTAMP_LTZ(ts,3)")
                              .watermark("et","et - INTERVAL '0.001' SECOND")
                              .build();
        //tableAPI的写法，只需要有 Table对象，无需表名
        Table table = tableEnvironment.fromDataStream(ds,schema);

        tableEnvironment.createTemporaryView("t1",table);

        /*
            All aggregates must be computed on the same window.
                多次聚合，他们的窗口范围必须一致
         */
        String sql1 = " select id,ts,vc,et ," +
            //"             sum(vc) over( partition by id order by pt rows between UNBOUNDED preceding and  CURRENT row )" +
            //"             sum(vc) over( partition by id order by pt rows between 2 preceding and  CURRENT row )" +
            //"             sum(vc) over( partition by id order by et range between UNBOUNDED preceding and  CURRENT row )" +
            "             sum(vc) over w sumVc ," +
            "             min(vc) over w sumVc ," +
            "             max(vc) over w maxVc " +
            "           from t1 " +
            "           window w as ( partition by id order by et range between INTERVAL '2' SECOND preceding and  CURRENT row )  ";

        tableEnvironment.sqlQuery(sql1).execute().print();

        //基于行数 rows
        // 范围是 上无边界到当前行
        OverWindow w2 = Over.partitionBy($("id")).orderBy($("pt")).preceding(UNBOUNDED_ROW).following(CURRENT_ROW).as("w");
        // 范围是 前2行，到当前行
        OverWindow w3 = Over.partitionBy($("id")).orderBy($("pt")).preceding(rowInterval(2l)).following(CURRENT_ROW).as("w");


        //基于时间范围 range
        //错误演示，下界超过了当前时间范围
        // 范围是 上无边界 到当前时间范围
        OverWindow w5 = Over.partitionBy($("id")).orderBy($("et")).preceding(UNBOUNDED_RANGE).following(CURRENT_RANGE).as("w");
        // 范围是 2s之前 到当前时间范围
        OverWindow w6 = Over.partitionBy($("id")).orderBy($("et")).preceding(lit(2).seconds()).following(CURRENT_RANGE).as("w");


        env.execute();

    }
}
