package table;

import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;


import java.util.Properties;

public abstract class Kafka2TableApp {

    void initAndStart(String servers,
                      String topic,
                      String groupId,
                      String CheckpointStorage,
                      String checkpointName,
                      String hadoopUser) {
        System.out.println("init..." );

        // 设置Hadoop代理用户
        if (hadoopUser == null || hadoopUser.equals("")) {
            hadoopUser = "root";
        }
        System.setProperty("HADOOP_USER_NAME", hadoopUser);
        // 创建流执行环境,并行级别 算子级别(具体算子,如reduceBy)>环境级别(创建环境setParallelism)>客户端级别(提交任务-p)>集群配置级别
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 设置checkpoint和状态存储
        env.setStateBackend(new HashMapStateBackend());
        // 设置间隔为60000ms,精准一次语义
        env.enableCheckpointing(60000, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        // env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
        // 设置checkpoint的存储方式和路径
        env.getCheckpointConfig().setCheckpointStorage(CheckpointStorage + "/flink/checkpoint/" + checkpointName);
        // 保存checkpoint,不会随着任务结束而删除checkpoint
        env.getCheckpointConfig().enableExternalizedCheckpoints(
                CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
        );

        // 创建Table环境
        EnvironmentSettings settings = EnvironmentSettings.newInstance()
                .useBlinkPlanner()  // hive要求使用
                .inStreamingMode()  // 流方式
                .build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, settings);

        // 添加kafka数据源
        DataStreamSource<String> kafkaSource = env.addSource(getKafkaSource(servers, topic, groupId));
        // 处理业务逻辑
        System.out.println("running..." );
        handler(tableEnv, kafkaSource);
        try {
            System.out.println("execute..." );
            env.execute(checkpointName);
        } catch (Exception e) {
            System.out.println("执行错误:" + e.getMessage());
            e.printStackTrace();
        }
    }

    private static FlinkKafkaConsumer<String> getKafkaSource(String servers, String topic, String groupId) {
        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", servers);
        properties.setProperty("group.id", groupId);
        properties.setProperty("auto.offset.reset", "earliest");
        // setCommitOffsetsOnCheckpoints(boolean) 方法会将offset保存到checkpoint当中
        properties.setProperty("enable.auto.commit", "false");
        // properties.setProperty("isolation.level", "read_committed");
        return new FlinkKafkaConsumer<String>(
                topic,
                new SimpleStringSchema(),
                properties
        );
    }

    // 定义执行方法,提供给下游方进行调用
    public abstract void handler(StreamTableEnvironment tableEnv, DataStreamSource<String> sourceStream);
}
