package sql;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.sql.Timestamp;
import java.util.UUID;

/**

 Over聚合根据时间语义可以划分为7种聚合方式：
 处理时间：
 1. 按处理时间 计算最近N行（包括当前行）的聚合结果
    agg_func() OVER(PARTITION BY name ORDER BY proctime ROWS BETWEEN N-1 PRECEDING AND CURRENT ROW) as agg_val
 2. 按处理时间 计算最近N min(包括当前行)的聚合结果
    agg_func() OVER(PARTITION BY name ORDER BY proctime RANGE BETWEEN INTERVAL 'N' MINUTE PRECEDING AND CURRENT ROW) as agg_val
 3. 按处理时间 无界限聚合
    agg_func() OVER(PARTITION BY name ORDER BY proctime ROWS BETWEEN UNBOUNDED AND CURRENT ROW) as agg_val

 事件时间：
 4. 按事件时间 计算最近N min(包括当前行)的聚合结果
    agg_func() OVER(PARTITION BY name ORDER BY rowtime RANGE BETWEEN INTERVAL 'N' MINUTE PRECEDING AND CURRENT ROW) as agg_val
 5. 按事件时间 无界限聚合
   agg_func() OVER(PARTITION BY name ORDER BY rowtime RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as agg_val
 6. 按处理时间 计算最近N行（包括当前行）的聚合结果
   agg_func() OVER(PARTITION BY name ORDER BY rowtime ROWS BETWEEN N-1 PRECEDING AND CURRENT ROW) as agg_val
 6. 按处理时间 按行数聚合 没有上下界
   agg_func() OVER(PARTITION BY name ORDER BY rowtime ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) as agg_val


 案例2：计算每个商品最近1min的总销售额
 2. 按处理时间 计算最近N min(包括当前行)的聚合结果
 agg_func() OVER(PARTITION BY name ORDER BY proctime RANGE BETWEEN INTERVAL '1' MINUTE PRECEDING AND CURRENT ROW) as
 agg_val

 */
public class D13_OverAgg2 {


    public static void main(String[] args) throws InterruptedException {


        Configuration flinkConf = new Configuration();
        flinkConf.setString("rest.port","9093");
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(flinkConf);
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        env.enableCheckpointing(10 * 1000);
        env.setStateBackend(new EmbeddedRocksDBStateBackend());
        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        checkpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        checkpointConfig.setCheckpointStorage("file:///flink_ckp");


        String genSql = "CREATE TABLE ods_tb ( " +
            " stime STRING," +
            " name STRING," +
            " val BIGINT," +
            " rowtime AS to_timestamp(stime)," +
            " proctime as proctime()," +
            " WATERMARK FOR rowtime AS rowtime - interval '10' second" +
            ") WITH ( " +
            "  'connector' = 'kafka'," +
            "  'topic' = 'test'," +
            "  'properties.bootstrap.servers' = 'kafka:9092'," +
            "  'properties.group.id' = '" + UUID.randomUUID().toString() +"'," +
            "  'scan.startup.mode' = 'latest-offset'," +
            "  'format' = 'json'" +
            ")";


        String sinkPrint = "CREATE TABLE print (" +
            "    stime timestamp," +
            "    name STRING, " +
            "    sum_val BIGINT" +
            ") WITH (" +
            "     'connector' = 'print'" +
            ")";


        String sql = "INSERT INTO print" +
            " SELECT " +
            " proctime, " +
            " name," +
            " SUM(val) OVER(PARTITION BY name ORDER BY proctime RANGE BETWEEN INTERVAL '1' MINUTE PRECEDING AND " +
            "CURRENT ROW) " +
            " as " +
            " sum_val" +
            " FROM ods_tb";

        tableEnv.executeSql(genSql);
        tableEnv.executeSql(sinkPrint);
        tableEnv.executeSql(sql);


        System.out.println(new Timestamp(System.currentTimeMillis()));



    }
}
