import com.atguigu.gmall.util.KafkaUtil;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.time.Duration;

public class test01 {
    public static void main(String[] args) {
        // TODO 1 获取执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //设置并行度
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        tableEnv.getConfig().setIdleStateRetention(Duration.ofSeconds(905L));
        // TODO 2 设置检查点和状态后端
                /*
                env.enableCheckpointing(5 * 60 * 1000L, CheckpointingMode.EXACTLY_ONCE);
                env.getCheckpointConfig().setCheckpointTimeout(3 * 60 * 1000L);
                env.getCheckpointConfig().setMaxConcurrentCheckpoints(2);
                env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/gmall/ck");
                System.setProperty("HADOOP_USER_NAME","atguigu");
                env.setStateBackend(new HashMapStateBackend());
                 */
        String topicName = "topic_db";
        String groupID = "dwd_trade_order_pre_proces";
        tableEnv.executeSql(KafkaUtil.getKafkaDB(topicName,groupID)).print();
    }
}
