package com.wsjj.gmall.base;

import com.wsjj.gmall.util.FlinkSourceUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.kafka.common.internals.Topic;


//模版设计模式（基类）
public abstract class BaseApp {
    /**
     *
     * @param env 流环境
     * @param stream  kafka流名称
     */

    public abstract void handle(StreamExecutionEnvironment env,DataStreamSource<String> stream);

    /**
     *
     * @param port  端口
     * @param parallelism   并行度
     * @param toipc     toipc名称
     * @param ckAndGroupId      消费者组id
     */
    public void start(int port,int parallelism,String toipc,String ckAndGroupId){
//TODO   1.检查点相关设计

//        System.setProperty("HADOOP_USER_NAME","atguigu");
//
//        Configuration configuration = new Configuration();
//        configuration.setInteger("rest.port",port);


        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(parallelism);

//        设置状态后端
//        env.setStateBackend(new HashMapStateBackend());

//        开启checkpoint
//        env.enableCheckpointing(5000);
//        设置checkpoint模式，精确一次
//        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
//        checkpoint 存储
//        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8082/gmall/stream/");
//        checkpoint 并发数
//        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
//        checkpoint之间的最小间隔
//        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(5000);
//        checkpoint 的超时时间
//        env.getCheckpointConfig().setCheckpointTimeout(10000);
//        job取消  checkpoint 保留策略
//        env.getCheckpointConfig().setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION);

        System.out.println("topic"+ toipc);

        KafkaSource<String> kafkaSource = FlinkSourceUtil.getKafkaSource(toipc, ckAndGroupId);

        DataStreamSource<String> stream = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafka_source");

        stream.print("starttjakjdfl");
//TODO 2.业务相关的逻辑
        handle(env,stream);

//        TODO 3.执行流处理
        try {
            env.execute();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }


    }


}
