package org.atguigu.gmall.realtime.app;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.atguigu.gmall.realtime.common.Constant;
import org.atguigu.gmall.realtime.util.FlinkSourceUtil;

/**
 * Project:gmallRealTime
 * Package:org.atguigu.gmall.realtime.app.dim
 * Author: ZengHaiFeng
 * CreateTime:2023/4/19 16:31:31
 * Description: Todo
 * Version：1.0
 */
public abstract class BaseApp {
    public abstract void handle(StreamExecutionEnvironment env, DataStreamSource<String> ds);

    /**
     * @description: initialization
     * @author: zenghaifeng
     * @date: 2023/4/22 16:44
     * @param port website port
     * @param parallelism
     * @param topic
     * @param name
     **/
    public void init(int port,int parallelism,String topic,String name){
        // set hadoop operate user
        System.setProperty("HADOOP_USER_NAME","atguigu");

        // configure web port
        Configuration conf = new Configuration();
        conf.setInteger("rest.port",port);
        // init stream env
        StreamExecutionEnvironment env
                = StreamExecutionEnvironment.getExecutionEnvironment(conf);

        // set parallelism
        env.setParallelism(parallelism);

        // set stateBackend
        env.setStateBackend(new HashMapStateBackend());

        // enable checkpoint
        env.enableCheckpointing(3 * 1000);

        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        // set checkpoint mode : Exactly once
        checkpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);

        // set checkpoint store : HDFS
        checkpointConfig.setCheckpointStorage("hdfs://hadoop162:8020/" + name );

        // set checkpoint timeout : 10s
        checkpointConfig.setCheckpointTimeout(10 * 1000);

        // set checkpoint min interval
        checkpointConfig.setMinPauseBetweenCheckpoints(500);

        // set job cancel don't delete checkpoint data
        checkpointConfig.setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        // configure source : kafka
        KafkaSource<String> kafkaSource = FlinkSourceUtil.getKafkaSource(
                Constant.KAFKA_SERVERS,
                topic,
                name);
        DataStreamSource<String> ds = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), name);

        // processing logic
        handle(env,ds);

        // run env
        try {
            env.execute();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
}
