package com.taimanetworks.kafka;

import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.runtime.state.StateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.util.Properties;

public abstract class Kafka2TableApp {
    public void initAndStart(String servers,
                             String topic,
                             String groupId,
                             String CheckpointStorage,
                             String checkpointName,
                             String hadoopUser) throws Exception {
        System.out.println("init...");

        // 设置Hadoop代理用户
        if (hadoopUser == null || hadoopUser.equals("")) {
            hadoopUser = "root";
        }
        System.setProperty("HADOOP_USER_NAME", hadoopUser);
        // 创建流执行环境,并行级别 算子级别(具体算子,如reduceBy)>环境级别(创建环境setParallelism)>客户端级别(提交任务-p)>集群配置级别
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //设置并行度
//        env.setParallelism(1);
        // 设置checkpoint和状态存储
        env.setStateBackend((StateBackend) new RocksDBStateBackend(CheckpointStorage));

        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

        // 2. 检查点配置
        env.enableCheckpointing(300);

        // 高级选项
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
/*        env.getCheckpointConfig().setCheckpointTimeout(60000L);
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(2);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(100L);
        env.getCheckpointConfig().setPreferCheckpointForRecovery(true);
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(0);*/

        // 3. 重启策略配置
        // 固定延迟重启
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 10000L));
        // 失败率重启

        // 创建Table环境
        EnvironmentSettings settings = EnvironmentSettings.newInstance()
//                .useBlinkPlanner()  // hive要求使用
                .inStreamingMode()  // 流方式
                .build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, settings);

        // 添加kafka数据源
        System.out.println("address:  "+servers);
        System.out.println("topic:  "+topic);
        DataStreamSource<String>  kafkaSource = env.addSource(getKafkaSource(servers, topic, groupId));

        kafkaSource.print("kafka.................");

        // 处理业务逻辑
        System.out.println("running...");
        try{
        handler(tableEnv, kafkaSource, env);

        }catch (Exception e) {
            System.out.println("flink消费kafka出错:" + e.getMessage());
            e.printStackTrace();
        }

        try {
            System.out.println("execute...");
            env.execute(checkpointName);
        } catch (Exception e) {
            System.out.println("执行错误:" + e.getMessage());
            e.printStackTrace();
        }


    }
    private static FlinkKafkaConsumer<String> getKafkaSource(String servers, String topic, String groupId) {
        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", servers);
        properties.setProperty("group.id", groupId);
        properties.setProperty("auto.offset.reset", "latest");

//        properties.setProperty("auto.offset.reset", "earliest");
        // setCommitOffsetsOnCheckpoints(boolean) 方法会将offset保存到checkpoint当中
        //"partition.assignment.strategy" -> "org.apache.kafka.clients.consumer.RangeAssignor",
//        properties.setProperty("partition.assignment.strategy", "org.apache.kafka.clients.consumer.RangeAssignor");
        properties.setProperty("enable.auto.commit", "false");
        // properties.setProperty("isolation.level", "read_committed");
        final FlinkKafkaConsumer<String> kafka09 = new FlinkKafkaConsumer<>(
                topic,
                new SimpleStringSchema(),
                properties
        );
        //从最后一条开始消费
//        kafka09.setStartFromGroupOffsets();

        kafka09.setStartFromLatest();
       return kafka09;
    }


    // 定义执行方法,提供给下游方进行调用
    public abstract void handler(StreamTableEnvironment tableEnv, DataStreamSource<String> sourceStream, StreamExecutionEnvironment env) throws Exception;
}
