package org.example.lookup_join;

import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class FlinkBatchDataCheck {
    public static void main(String[] args) {
        // 设置批处理执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setRuntimeMode(RuntimeExecutionMode.BATCH); // 设置为批处理模式
        EnvironmentSettings settings = EnvironmentSettings.newInstance().inBatchMode().build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, settings);

        // 创建Paimon目录
        String createCatalogSQL = "CREATE CATALOG paimon WITH (" +
                "    'type' = 'paimon'," +
                "    'warehouse' = 'file:///tmp/paimon'" +
                ");";
        System.out.println("执行SQL: " + createCatalogSQL);
        tableEnv.executeSql(createCatalogSQL);

        // 使用Paimon目录
        String useCatalogSQL = "USE CATALOG paimon;";
        System.out.println("执行SQL: " + useCatalogSQL);
        tableEnv.executeSql(useCatalogSQL);

        // 检查t_user_table数据
        System.out.println("检查t_user_table数据：");
        String checkUserTableSQL = "SELECT * FROM `paimon`.`default`.`t_user_table` LIMIT 10;";
        System.out.println("执行SQL: " + checkUserTableSQL);
        tableEnv.executeSql(checkUserTableSQL).print();

        // 检查t_user_table_with_eventtime数据
        System.out.println("检查t_user_table_with_eventtime数据：");
        String checkUserTableWithEventTimeSQL = "SELECT * FROM `paimon`.`default`.`t_user_table_with_eventtime` LIMIT 10;";
        System.out.println("执行SQL: " + checkUserTableWithEventTimeSQL);
        tableEnv.executeSql(checkUserTableWithEventTimeSQL).print();
    }
} 