package com.ruyuan.event.streaming.process;
import com.ruyuan.event.streaming.pojo.EventJoinLog;
import com.ruyuan.event.streaming.schema.EventJoinLogSchema;
import com.ruyuan.event.streaming.utils.Constants;
import com.ruyuan.event.streaming.utils.FlinkKafkaUtils;
import com.twitter.chill.protobuf.ProtobufSerializer;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.api.windowing.triggers.ProcessingTimeTrigger;
import org.apache.flink.streaming.api.windowing.triggers.PurgingTrigger;

/**
 * 重试逻辑处理
 * */

public class StreamingJobRetryEntryPoint {
    /**
     * 关联的topic信息
     * */
    private static String KAFKA_EVENT_RETRY_LOG = Constants.KAFKA_EVENT_RETRY_LOG;
    private static String KAFKA_EVENT_JOIN_LOG = Constants.KAFKA_EVENT_JOIN_LOG;
    private static String KAFKA_EVENT_JOIN_REPORT = Constants.KAFKA_EVENT_JOIN_REPORT;

    public static void main(String[] args) {
        String groupId = "StreamingJobRetryEntryPoint";
        StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment();
        environment.enableCheckpointing(60000);
        environment.setParallelism(3);
        //在Checkpoint发生异常的时候，是否应该fail该task，默认为true，如果设置为false，则task会拒绝Checkpoint继续运行
        environment.getCheckpointConfig().setFailOnCheckpointingErrors(false);
        environment.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
        environment.getConfig().enableForceAvro();
        environment.getConfig().registerTypeWithKryoSerializer(EventJoinLog.class, ProtobufSerializer.class);

        //读取重试的topic
        DataStreamSource<EventJoinLog> eventJoinLogDataStreamSource = environment.addSource(FlinkKafkaUtils.getKafkaEventJoinRetrySource(KAFKA_EVENT_RETRY_LOG, new EventJoinLogSchema(), groupId));

        /**
         * 得看上游有没有重复，如果说上游会造成数据重复，在这里处理的时候需要进行keyBy
         * 重试的条件：
         *      次数：不能大于5此，如果重试次数等于5次，那么就写入下游。（不会丢弃数据）
         *      时间：每隔5秒重试一次
         * */
        eventJoinLogDataStreamSource.keyBy(EventJoinLog::getRequestId)
                .timeWindow(Time.seconds(5000))
                .trigger(PurgingTrigger.of(ProcessingTimeTrigger.create()))
                .apply(new EventRetryJoinWindowFunction());
    }

}
