package io.openmessaging.storage;

import io.openmessaging.Conf;
import io.openmessaging.Message;
import io.openmessaging.Topic;
import io.openmessaging.io.FileChannelWrapper;
import io.openmessaging.io.IO;
import io.openmessaging.map.OffsetMap;
import io.openmessaging.map.TopicQueueMap;
import io.openmessaging.utils.*;

import java.nio.ByteBuffer;
import java.util.Enumeration;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;

public class PersistentMemoryStorageFileChannel implements Storage, Runnable {
    //总共256个文件
    private IO[] fileChannels = new IO[256];
    private int tmpFileIndex = 255;//用于整理碎片时存放有效数据
    private final long fileSize = 60L * 1024 * 1024 * 1024 / 257;//单个文件大小控制,实际大小会超出一点
    private TopicQueueMap<QueueIndex> queues = new TopicQueueMap<>(QueueIndex.class);

    static class QueueIndex {
        OffsetMap<FileIndex> msgIndex = new OffsetMap<>();
        volatile long nextGetOffset;
    }

    public PersistentMemoryStorageFileChannel() {
        for (int i = 0; i < fileChannels.length; i++) {
            garbageSize[i] = new AtomicLong();
            fileChannels[i] = new FileChannelWrapper(Conf.pmemPath + "/" + i);
        }
        new Thread(this, "pMem").start();
    }

    private final ConcurrentLinkedQueue<Message> saveQueue = new ConcurrentLinkedQueue<>();

    @Override
    public void save(Message m) {
        if (saveQueue.size() > 1024 * 1024 / 8)
            return;

        m = m.clone();//ssd落盘后返回，buf会复用并写入新数据，此时pmem可能还没有落盘
        Statistic.Pmem.saveQueuesSizeAdd(m.data.remaining());
        saveQueue.offer(m);
    }

    ByteBuffer head = ByteBuffer.allocateDirect(9);//消息头，储存 topicId(1) queueId(2) offset(4) dataLength(2) 共9字节

    @Override
    public void run() {
        try {
            int fileId = 0;
            out:
            while (true) {
                IO file = fileChannels[fileId];
                if (fileChannels[fileId].size() > fileSize) {
//                    if (fileChannels[fileId].size() > fileSize)
//                        Log.i("文件大小超预期： fileId = ", fileId, " size = ", fileChannels[fileId].size(), " tmpFileIndex = ", tmpFileIndex);
                    int nextFileId = nextFileId(fileId);
                    if (fileChannels[nextFileId] == null || fileChannels[nextFileId].size() < fileSize) {
                        fileId = nextFileId;
                    } else {
                        fileId = compact();
                    }
//                    Log.i("切换文件：fileId = ", fileId);
                    continue;
                }
                long writeSize = 0;
                for (Message m = saveQueue.poll(); file.size() < fileSize; m = saveQueue.poll()) {
                    if (m == null) {
                        Thread.sleep(1);
                        continue out;
                    }
                    QueueIndex queueIndex = queues.computeIfAbsent(m.topic.id, m.queueId, QueueIndex::new);
                    if (m.offset < queueIndex.nextGetOffset)
                        continue;

                    writeSize += m.data.remaining();
                    FileIndex index = new FileIndex((byte) fileId, file.size(), (short) m.data.remaining());

                    head.clear();
                    head.put(m.topic.id).putShort((short) m.queueId).putInt((int) m.offset).putShort((short) m.data.remaining()).flip();
//                        Log.i("pmem_write: ", m.topic.name + "_" + m.queueId + "_" + m.offset + " ", index, Strings.print(m.data, 15));
                    file.write(head, m.data);//先写数据再加索引
                    queueIndex.msgIndex.set(m.offset, index);
                }
                Statistic.Pmem.saveQueuesSizeAdd(-writeSize);
                Statistic.Pmem.write(writeSize);
            }
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

    private int nextFileId(int fileId) {
        int next = fileId + 1;
        return next == fileChannels.length ? nextFileId(-1) : next == tmpFileIndex ? nextFileId(next) : next;
    }

    private final AtomicLong[] garbageSize = new AtomicLong[256];//已经被get过的消息不会继续get，标记为垃圾对象,记录每个文件中垃圾对象的总大小
    private volatile ConcurrentHashMap<Integer, Boolean> compactFileIds = new ConcurrentHashMap<>();

    private int compact() throws InterruptedException {//整理碎片
        if (compactFileIds.isEmpty()) {
            Log.i("Pmem等待整理碎片");
            synchronized (this) {
                wait();
            }
        }
        int maxGarbageSizeFileId = 0;
        long maxGarbageSize = 0;
        Enumeration<Integer> keys = compactFileIds.keys();
        while (keys.hasMoreElements()) {//查找可整理空间最大的文件
            int i = keys.nextElement();
            long size = garbageSize[i].get();
            if (size > maxGarbageSize && i != tmpFileIndex) {
                maxGarbageSizeFileId = i;
                maxGarbageSize = size;
            }
        }
        compactFileIds.remove(maxGarbageSizeFileId);

        IO oldChannel = fileChannels[maxGarbageSizeFileId];

        IO newChannel = fileChannels[tmpFileIndex];
        if (newChannel != null) {
            newChannel.delete();
        }
        fileChannels[tmpFileIndex] = (newChannel = new FileChannelWrapper(Conf.pmemPath + "/" + tmpFileIndex));
        garbageSize[tmpFileIndex].set(0);

        Cost cost = new Cost().start();
        ByteBuffer buf = ByteBuffer.allocate(17 * 1024 + 9);
        long readPos = 0;
        while (readPos < oldChannel.size()) {
            buf.clear().limit(9);
            readPos += oldChannel.read(buf, readPos);//读消息头 topicId(1) queueId(2) offset(4) dataLength(2)
            byte topicId = buf.get(0);
            short queueId = buf.getShort(1);
            long offset = buf.getInt(3);
            short dataSize = buf.getShort(7);
            QueueIndex queueIndex = queues.get(topicId, queueId);
            FileIndex newIndex = null;
            if (offset >= queueIndex.nextGetOffset) {//不是垃圾，复制到新文件中
                newIndex = new FileIndex((byte) tmpFileIndex, newChannel.size(), dataSize);
//                Log.i("reIndex ", topicId, "_", queueId, "_", offset, " ", oldChannel.position(), " ", newIndex.position);
                buf.limit(buf.limit() + dataSize);
                readPos += oldChannel.readAndFlip(buf, readPos);
                newChannel.write(buf);
            } else {//垃圾跳过
                readPos += dataSize;
                Statistic.Pmem.free(dataSize);
            }
            queueIndex.msgIndex.set(offset, newIndex);
        }
        cost.pause();
        Log.i("Pmem碎片整理完毕：预计：", Strings.format(maxGarbageSize)
                , " oldId=", maxGarbageSizeFileId, " newId=", tmpFileIndex
                , " 整理前/后", Strings.format(oldChannel.size()), Strings.format(newChannel.size())
                , " 耗时： ", cost);

        int compactedFileIndex = tmpFileIndex;
        tmpFileIndex = maxGarbageSizeFileId;
        return compactedFileIndex;

    }

    @Override
    public ByteBuffer get(Topic topic, int queueId, long offset) {
        QueueIndex queueIndex = this.queues.get(topic.id, queueId);
        if (queueIndex == null)
            return null;
        FileIndex index = queueIndex.msgIndex.get(offset);
        if (index == null)
            return null;

        int compactFileIdSize = compactFileIds.size();
        ByteBuffer buf = ByteBuffer.allocate(index.length);
        int fileId = index.fileId & 0xff;
        for (; queueIndex.nextGetOffset <= offset; queueIndex.nextGetOffset++) {
            FileIndex garbageIndex = queueIndex.msgIndex.get(queueIndex.nextGetOffset);
            if (garbageIndex != null) {
                long size = garbageSize[fileId].addAndGet(garbageIndex.length);
                if (size > fileSize / 8) {
                    compactFileIds.putIfAbsent(fileId, Boolean.TRUE);
                }
            }
        }
        if (compactFileIdSize == 0 && compactFileIds.size() > 0) {
            Log.i("Pmem通知可以进行碎片整理");
            synchronized (this) {
                notify();
            }
        }
        fileChannels[fileId].readAndFlip(buf, index.position + 9);//跳过header
        Statistic.Pmem.read(buf.remaining());
//        Log.i("pmem_get: ", topic.name + "_" + queueId + "_" + offset + " ", index, Strings.print(buf, 15));
        return buf;
    }
}
