package org.lzfto.flink.demo.business.flinkTest.source.mqtt;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;

import javax.annotation.Nullable;

import org.apache.flink.api.connector.source.SplitEnumerator;
import org.apache.flink.api.connector.source.SplitEnumeratorContext;
import org.apache.flink.runtime.operators.coordination.OperatorEvent;
import org.apache.flink.runtime.operators.coordination.OperatorEventHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class MqttEnumerator implements SplitEnumerator<MqttSplit, MqttEnumeratorCheckpoint>,OperatorEventHandler  {
	
	private static Logger LOGGER = LoggerFactory.getLogger(MqttEnumerator.class);
	
    private final SplitEnumeratorContext<MqttSplit> context;
    private final Map<Integer, List<MqttSplit>> pendingSplits;
    private final List<MqttSplit> allSplits;

    public MqttEnumerator(SplitEnumeratorContext<MqttSplit> context, List<MqttSplit> initialSplits) {
        this.context = context;
        this.pendingSplits = new HashMap<>();
        this.allSplits = new ArrayList<>(initialSplits);
    }

    @Override
    public void start() {
        // 初始化时分配所有split
        for (MqttSplit split : allSplits) {
            int subtask = split.splitId().hashCode() % context.currentParallelism();
            pendingSplits.computeIfAbsent(subtask, id -> new ArrayList<>()).add(split);
        }
    }
    
    @Override
    public void handleSplitRequest(int subtaskId, @Nullable String requesterHostname) {
        List<MqttSplit> splits = pendingSplits.get(subtaskId);
        if (splits != null && !splits.isEmpty()) {
            Iterator<MqttSplit> iterator = splits.iterator();
            while (iterator.hasNext()) {
            	MqttSplit split=iterator.next();
                context.assignSplit(split, subtaskId);
                LOGGER.info("mqtt source 分配分片ID:{}.字任务编号:{}",split.splitId(),subtaskId);
                iterator.remove(); // 逐步移除已分配的
            }
        } else {
            context.signalNoMoreSplits(subtaskId);
        }
    }


    @Override
    public void addSplitsBack(List<MqttSplit> splits, int subtaskId) {
        pendingSplits.computeIfAbsent(subtaskId, id -> new ArrayList<>()).addAll(splits);
    }

    @Override
    public void addReader(int subtaskId) {
        // 新reader加入时，如果有pending splits则分配
        handleSplitRequest(subtaskId, null);
    }

    @Override
    public MqttEnumeratorCheckpoint snapshotState(long checkpointId) throws Exception {
        // 保存当前枚举器的状态，包括待分配的分片和所有分片
        Map<Integer, List<byte[]>> serializedPendingSplits = new HashMap<>();
        for (Map.Entry<Integer, List<MqttSplit>> entry : pendingSplits.entrySet()) {
            List<byte[]> serializedSplits = new ArrayList<>();
            for (MqttSplit split : entry.getValue()) {
                serializedSplits.add(split.serialize());
            }
            serializedPendingSplits.put(entry.getKey(), serializedSplits);
        }

        List<byte[]> serializedAllSplits = new ArrayList<>();
        for (MqttSplit split : allSplits) {
            serializedAllSplits.add(split.serialize());
        }

        return new MqttEnumeratorCheckpoint(serializedPendingSplits, serializedAllSplits, checkpointId);
    }

    @Override
    public void close() throws IOException {
        // 清理资源
        pendingSplits.clear();
        allSplits.clear();
    }

	@Override
	public void handleOperatorEvent(OperatorEvent evt) {
		if (evt instanceof TerminateSignalEvent) {
			Iterator<Entry<Integer, List<MqttSplit>>> it= pendingSplits.entrySet().iterator();
			while (it.hasNext()) {
				Entry<Integer, List<MqttSplit>> item=it.next();
				int taskId=item.getKey();
				LOGGER.info("通知Task({})停止");
				context.signalNoMoreSplits(taskId);	
			}
        }
	}
}
