package com.tdy.cdc.app;

import com.tdy.cdc.util.ConfigUtil;
import com.tdy.cdc.util.MyKafkaDeserialization;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.connector.kafka.source.reader.deserializer.KafkaRecordDeserializationSchema;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.KafkaDeserializationSchema;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import scala.reflect.io.Streamable;

import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.List;

/**
 * @author NanHuang
 * @Date 2023/1/24
 */
public abstract class BaseApp {
    private String commonGroupId;
    public void init(Integer port,String groupId,Integer... params){
        System.setProperty("HADOOP_USER_NAME","root");
        this.commonGroupId = groupId;
        // 1 创建编程环境
        Configuration conf = new Configuration();
        conf.setInteger("rest.port",port);//设置前web端口
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        if (params.length == 1) {
            env.setParallelism(params[0]);//设置并行度
        }
        // 2 设置状态后端和Checkpoint
        env.setStateBackend(new EmbeddedRocksDBStateBackend());//启用RocksDB状态后端
        env.enableCheckpointing(2 * 60 * 1000);//设置checkpoint的周期
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);//设置同时checkpoint的数量
        env.getCheckpointConfig().setCheckpointTimeout(10 * 60 * 1000);//checkpoint的超时时间
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.getCheckpointConfig().setCheckpointStorage(ConfigUtil.getProperty("checkpoint.storage.url.prefix.test") + commonGroupId);//设置checkpoint的存储路径
        // 故障重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,30));
        // 禁用算子链优化
//        env.disableOperatorChaining();
        // 3 对数据进行处理
        invoke(env);
        // 4 运行编程环境
        try {
            env.execute(commonGroupId);//给应用设置名称
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }

    public abstract void invoke(StreamExecutionEnvironment env);

    public DataStreamSource<String> readKafka(StreamExecutionEnvironment env, List<String> topics){
        KafkaSource<String> source = KafkaSource.<String>builder()
                .setBootstrapServers(ConfigUtil.getProperty("kafka.bootstrap.servers"))
                .setTopics(topics)
                .setGroupId(commonGroupId)
                .setStartingOffsets(OffsetsInitializer.earliest())
                .setDeserializer(KafkaRecordDeserializationSchema.of(new MyKafkaDeserialization()))
                .build();
        return env.fromSource(source, WatermarkStrategy.noWatermarks(), "Kafka Source");
    }

//    protected DataStreamSource<String> readKafka(StreamExecutionEnvironment env, List<String> topics) {
//        KafkaSource<String> kafkaSource = KafkaSource.<String>builder()
//                .setBootstrapServers(ConfigUtil.getProperty("kafka.bootstrap.servers"))//kafka服务器地址
//                .setTopics(topics)//消费的topic
//                .setGroupId(commonGroupId)//消费者组id
//                .setStartingOffsets(OffsetsInitializer.earliest())//设置从何处开始读取数据
//                .setValueOnlyDeserializer(new DeserializationSchema<String>() {
//                    @Override
//                    public String deserialize(byte[] bytes) throws IOException {
//                        if (bytes != null) {
//                            return new String(bytes, StandardCharsets.UTF_8);
//                        }
//                        return null;
//                    }
//
//                    @Override
//                    public boolean isEndOfStream(String s) {
//                        return false;
//                    }
//
//                    @Override
//                    public TypeInformation<String> getProducedType() {
//                        return TypeInformation.of(String.class);
//                    }
//                })//设置value的反序列化器
//                .setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true")//设置向kafka提交消费数据的偏移量
//                .setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "500")//设置提交的数据间隔
//                .setProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed")//设置分离级别，值读取提交的数据
//                .build();
//        return env.fromSource(kafkaSource,WatermarkStrategy.noWatermarks(),"kafka_source");
//    }
}
