package com.sugon.ohdfs.integration.flink.job.table;

import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.*;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.springframework.beans.factory.annotation.Value;

import javax.annotation.PostConstruct;

public class SchemaBuildJob {

    @Value("${flink.checkpoint.interval:60000}")
    long checkPointInterval;
    @Value("${hdfs.checkpoint.path:hdfs://10.11.8.29:9001/flink/sinkTest/checkpoint}")
    String checkPointFilePath;
    @Value("${hdfs.savepoint.path:hdfs://10.11.8.29:9001/flink/sinkTest/savepoint}")
    String savePointFilePath;
    @Value("${hdfs.base-path:hdfs://10.11.8.29:9001/flink/sinkTest/table/test}")
    String basePath;


    @PostConstruct
    public void init() throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //IMPORTANT: Checkpointing needs to be enabled when using the FileSink in STREAMING mode. Part files can only
        // be finalized on successful checkpoints. If checkpointing is disabled, part files will forever stay in the
        // in-progress or the pending state, and cannot be safely read by downstream systems.
        env.enableCheckpointing(checkPointInterval, CheckpointingMode.EXACTLY_ONCE);
        env.setStateBackend(new HashMapStateBackend());
        env.getCheckpointConfig().setCheckpointStorage(checkPointFilePath);
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);

        EnvironmentSettings settings = EnvironmentSettings
                .newInstance()
                .inStreamingMode()
                //.inBatchMode()
                .build();
        TableEnvironment tableEnvironment = TableEnvironment.create(settings);
        final TableDescriptor sourceDescriptor = TableDescriptor
                .forConnector("filesystem")
                .schema(Schema.newBuilder()
                        .column("name", DataTypes.STRING())
                        .column("value", DataTypes.STRING())
                        .column("processTimestamp", DataTypes.BIGINT())
                        .column("produceTimestamp", DataTypes.BIGINT())
                        .build())
                .option("path", basePath)
                .option("format", "json")
                .option("json.fail-on-missing-field", "false")
                .option("json.ignore-parse-errors", "true")
                .build();
        tableEnvironment.createTable(basePath, sourceDescriptor);
        Table test = tableEnvironment.from(sourceDescriptor);
        TableResult tableResult = test.executeInsert(sourceDescriptor);
        tableResult.collect();
    }

}
