package com.atguigu.edu.app;

import com.atguigu.edu.util.FlinkSourceUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

public abstract class BaseApp {
    public void init(int port,
                     int p,
                     String ckAndGroupId,
                     String topic){

        //配置idea操作hdfs的用户为atguigu
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        //创建流环境
        Configuration configuration = new Configuration();
        configuration.setInteger("rest.port",port);
        StreamExecutionEnvironment ev = StreamExecutionEnvironment.getExecutionEnvironment(configuration);
        ev.setParallelism(p);//设置并行度：一般与kafka的分区数保持一致
        ev.setStateBackend(new HashMapStateBackend());//设置状态后端为hashmap
        ev.enableCheckpointing(3000);//设置状态3秒更新一次
        ev.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        ev.getCheckpointConfig().setMaxConcurrentCheckpoints(1);//设置最大的可同时进行checkpoint的数量
        ev.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);// 上一个 checkpoint 结束 500ms 之后,才会开启下一个。设置这个可不设置上面哪个
        ev.getCheckpointConfig().setCheckpointTimeout(60*1000);//设置超时时间
        ev.getCheckpointConfig().setExternalizedCheckpointCleanup((CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION));
        ev.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/edu/"+ckAndGroupId);
        //获取source
        DataStreamSource<String> stream = ev.fromSource(FlinkSourceUtil.getKafkaSource(ckAndGroupId, topic),
                WatermarkStrategy.noWatermarks(), "kafka_source");

        //处理逻辑
        handle(ev,stream);

        //执行流
        try {
            ev.execute(ckAndGroupId);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    public abstract void handle(StreamExecutionEnvironment ev, DataStreamSource<String> stream);

}