package com.yl.flink.processor;

import cn.hutool.core.date.DateUtil;
import com.yl.constant.Const;
import com.yl.entity.MultiDataEntity;
import com.yl.entity.Packet;
import com.yl.entity.cdc.BatchIntegrity;
import com.yl.util.SUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.state.MapState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.StateTtlConfig;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.common.typeinfo.BasicTypeInfo;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.util.Collector;

/**
 * @author wlf
 * @since 2022/8/18
 */
@Slf4j
public class IntegrityJudgeFunc extends KeyedProcessFunction<String, MultiDataEntity, BatchIntegrity> {

    // 每天每个批次接收的包总数 <theDay_batchId, receivePacketSum>
    private transient MapState<String, BatchIntegrity> receivePacketSumState;

    /**
     * 初始化资源配置
     */
    @Override
    public void open(Configuration parameters) throws Exception {
        MapStateDescriptor<String, BatchIntegrity> mapStateDescriptor = new MapStateDescriptor<>(
                Const.FLINK_BATCH_INTEGRITY, BasicTypeInfo.STRING_TYPE_INFO, TypeInformation.of(new TypeHint<>() {
        }));
        // 设置每个key24小时的有效期
        StateTtlConfig ttlConfigString = StateTtlConfig.newBuilder(Time.hours(24))
                .setUpdateType(StateTtlConfig.UpdateType.OnCreateAndWrite)
                .setStateVisibility(StateTtlConfig.StateVisibility.NeverReturnExpired)
                .build();
        mapStateDescriptor.enableTimeToLive(ttlConfigString);
        receivePacketSumState = getRuntimeContext().getMapState(mapStateDescriptor);
    }

    /**
     * 判断批次完整性
     * 1.每个数据包收到一个带包信息的流元素；
     * 2.最终以第一个包的开始时间作为该批次的开始时间，以最后一个包的结束时间作为该批次的结束时间；
     * 3.当收到某个批次的包id为1时，在数据库创建一条该批次记录，开始时间为该包的开始时间；
     * 4.再接收到该批次的其他包元素，然后不断更新该批次接收的包总数；
     * 5.当收到该批次的包id为包总数大小时，在数据库更新该批次记录，结束时间为该包的结束时间；
     * 6.当该批次接收的包总数等于包总数时，在数据库更新该批次记录的完整性；
     *
     * @param multiDataEntity 数据流元素
     * @param ctx             上下文
     * @param out             输出器
     */
    @Override
    public void processElement(MultiDataEntity multiDataEntity, KeyedProcessFunction<String, MultiDataEntity, BatchIntegrity>.Context ctx, Collector<BatchIntegrity> out) throws Exception {
        Integer targetId = multiDataEntity.getTarget().getId();
        Packet packet = multiDataEntity.getPacket();
        Integer batchId = packet.getBatchId();
        int packetId = packet.getPacketId();
        int packetSum = packet.getPacketSum();
        long packetStartTime = packet.getPacketStartTime();
        long packetEndTime = packet.getPacketEndTime();
        // 判断是否已缓存该批次信息
        String theDay = DateUtil.date(packetStartTime).toString(Const.FMT_TRIM_DAY);
        String key = SUtil.fmt(theDay, batchId);
        BatchIntegrity batchIntegrity;
        if (receivePacketSumState.contains(key)) {
            // 更新缓存接收到的包总数
            batchIntegrity = receivePacketSumState.get(key);
            batchIntegrity.setPacket_sum_receive(batchIntegrity.getPacket_sum_receive() + 1);
        } else {
            // 缓存该批次信息
            batchIntegrity = BatchIntegrity.builder()
                    .target_id(targetId)
                    .cj_day(theDay)
                    .batch_id(batchId)
                    .packet_sum_design(packetSum)
                    .packet_sum_receive(1)
                    .integrity(0)
                    .create_time(DateUtil.date().toString())
                    .build();
            receivePacketSumState.put(key, batchIntegrity);
        }
        // 更新缓存信息
        receivePacketSumState.put(key, batchIntegrity);
        if (packetId == packetSum) {
            // 当包id和拆分包总数相等时，更新该批次的结束时间，即每个批次最后一个包的结束时间作为该批次的结束时间
            batchIntegrity.setEnd_time(DateUtil.date(packetEndTime).toString());
        } else if (packetId == 1) {
            // 当包id为1时，更新该批次的开始时间，即每个批次第一个包的开始时间作为该批次的开始时间
            batchIntegrity.setStart_time(DateUtil.date(packetStartTime).toString());
        }
        if (packetSum == batchIntegrity.getPacket_sum_receive()) {
            // 当该批次接收的包总数等于包总数时，该批次完整
            batchIntegrity.setIntegrity(1);
            // 批次完整就把批次的缓存信息删除
            receivePacketSumState.remove(key);
        }
        // 输出到下游去更新数据库记录
        log.info("batchIntegrity:{}", batchIntegrity);
        out.collect(batchIntegrity);
    }

}
