package t20240724;

import org.apache.flink.configuration.*;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.TableConfig;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.api.config.ExecutionConfigOptions;
import org.apache.flink.contrib.streaming.state.RocksDBConfigurableOptions;

import java.io.IOException;
import java.sql.Timestamp;
import java.time.Duration;


/**

 row_number() desc rowtime 会被翻译为 Deduplicate(keep=[FirstRow] 算子
 需要注意的时 状态如何清理
 */
public class D6_Dump {


    /**
     * 421
     * COMPLETED
     * 20/20	2024-07-25 18:14:35	2024-07-25 18:14:35	85ms	28.8 MB	0 B (0 B)
     */

    public static void main(String[] args) throws InterruptedException, IOException {


        /**
         * rocksdb.compaction.level.max-size-level-base : 4096
         * rocksdb.compaction.level.target-file-size-base  : 2048
         * rocksdb.writebuffer.size : 2048
         */
        Configuration flinkConf = new Configuration();
        flinkConf.setString("rest.port","9092");

        flinkConf.set(TaskManagerOptions.MANAGED_MEMORY_SIZE, new MemorySize(1024 * 1024 * 1024));
        flinkConf.set(RocksDBConfigurableOptions.LOG_LEVEL, org.rocksdb.InfoLogLevel.DEBUG_LEVEL);


        Configuration config = new Configuration();
        config.set(StateBackendOptions.STATE_BACKEND, "rocksdb");
        config.set(CheckpointingOptions.CHECKPOINT_STORAGE, "filesystem");
        config.set(CheckpointingOptions.CHECKPOINTS_DIRECTORY, "file:///flink_ckp_rocks");

       config.set(RocksDBConfigurableOptions.LOG_LEVEL, org.rocksdb.InfoLogLevel.DEBUG_LEVEL);
//        config.set(RocksDBConfigurableOptions.TARGET_FILE_SIZE_BASE, new MemorySize(4 * 1024 * 1024));
//        config.set(RocksDBConfigurableOptions.TARGET_FILE_SIZE_BASE, new MemorySize(4 * 1024 * 1024));




        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(flinkConf);

        env.configure(config, Thread.currentThread().getContextClassLoader());
        env.enableCheckpointing(1000);


        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        checkpointConfig.setMaxConcurrentCheckpoints(1);



//        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(10);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

      TableConfig tableConfig = tableEnv.getConfig();
      tableConfig.setIdleStateRetention(Duration.ofSeconds(30));


      String genSql = "CREATE TABLE ods_tb ( " +
                " stime TIMESTAMP(3)," +
                " name STRING," +
                " id BIGINT," +
                " ts AS localtimestamp," +
                " rowtime AS localtimestamp," +
                " proctime as proctime()," +
                " WATERMARK FOR rowtime AS rowtime - interval '30' second" +
                ") WITH ( " +
                " 'connector'='datagen', " +
                " 'rows-per-second'='100'," +
                " 'fields.name.length'='10000'," +
                " 'fields.id.kind'='random'" +
                ")";

        String sink = "CREATE TABLE sink1 (" +"    " +
                " stime TIMESTAMP(3)," +
                "    name STRING, " +
                "    id BIGINT " +
                ") WITH (" +
                "    'connector' = 'blackhole'" +
                ")";


        String sql1 = "insert into sink1 " +
                " SELECT stime,name,id FROM " +
                " (SELECT *,ROW_NUMBER() OVER(PARTITION BY stime,name ORDER BY proctime) as rownum FROM ods_tb) " +
                " WHERE rownum = 1";

        tableEnv.executeSql(genSql);
        tableEnv.executeSql(sink);

        tableEnv.executeSql(sql1);


        System.out.println(new Timestamp(System.currentTimeMillis()));



    }
}
