package com.whz.bus.data;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.connect.runtime.WorkerConfig;
import org.apache.kafka.connect.storage.MemoryOffsetBackingStore;
import org.springframework.util.Assert;
import org.springframework.util.CollectionUtils;
import tech.tongyu.nacos.client.utils.TongyuNacosClientUtils;

import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;

/**
 * 保存binlog的offset到nacos
 */
@Slf4j
public class NacosOffsetStorage extends MemoryOffsetBackingStore {

    public static final String OffSET_DATA_ID = "tongyu.debezium.offset.dataId";
    public static final String OffSET_GROUP = "tongyu.debezium.offset.group";

    private String dataId;
    private String group;

    @Override
    public void configure(WorkerConfig config) {
        super.configure(config);

        // 自定义的属性在 originals 中
        dataId = config.originals().get(OffSET_DATA_ID).toString();
        group = config.originals().get(OffSET_GROUP).toString();
        Assert.hasText(dataId, "nacos dataId not empty");
        Assert.hasText(group, "nacos group not empty");
    }

    @Override
    public synchronized void start() {
        super.start();
        log.info("Starting NacosOffsetStorage with dataId:{},Group:{}", dataId, group);
        load();
    }

    @Override
    public synchronized void stop() {
        super.stop();
        log.info("Stopped NacosOffsetStorage");
    }

    private void load() {
        try {
            Map<byte[], byte[]> raw = TongyuNacosClientUtils.getConfigObject(dataId, group, 10000);
            if (CollectionUtils.isEmpty(raw)) {
                log.info("load meta data from nacos but it's empty.");
                return;
            }
            data = new HashMap<>();
            for (Map.Entry<byte[], byte[]> mapEntry : raw.entrySet()) {
                ByteBuffer key = (mapEntry.getKey() != null) ? ByteBuffer.wrap(mapEntry.getKey()) : null;
                ByteBuffer value = (mapEntry.getValue() != null) ? ByteBuffer.wrap(mapEntry.getValue()) : null;
                data.put(key, value);
            }
            log.info("load offset data map from nacos.");
        } catch (Exception e) {
            log.error("load offset data map from nacos error.", e);
        }
    }

    @Override
    protected void save() {
        try {
            Map<byte[], byte[]> raw = new HashMap<>();
            for (Map.Entry<ByteBuffer, ByteBuffer> mapEntry : data.entrySet()) {
                byte[] key = (mapEntry.getKey() != null) ? mapEntry.getKey().array() : null;
                byte[] value = (mapEntry.getValue() != null) ? mapEntry.getValue().array() : null;
                raw.put(key, value);
            }

            boolean published = TongyuNacosClientUtils.publishConfigObject(dataId, group, raw);
            log.info("publish offset data map to nacos:{}", published);
        } catch (Exception e) {
            log.error("publish offset data map to nacos error.", e);
        }
    }
}