package com.kingsoft.minibase;

import java.io.*;
import java.security.Key;
import java.util.ArrayList;
import java.util.List;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.zip.Checksum;

public class DiskFile {
    private static final int BLOCK_SIZE_UP_LIMIT = 1024 *1024 * 2;
    public static final int BLOOM_FILTER_HASH_COUNT = 3;
    public static final int BLOOM_FILTER_BITS_PER_KEY = 10;
    public static final long DISK_FILE_MAGIC = 0xFAC881234221FFA9L;

    // fileSize(8B)+ blockCount(4B) + blockIndexOffset(8B) + blockIndexOffset(8B) + DISK_FILE_MAGIC
    // (8B)
    public static final int TRAILER_SIZE = 8 + 4 + 8 + 8 + 8;
    private String fileName;

    private long fileSize;
    private int blockCount;
    private long blockIndexOffset;
    private long blockIndexSize;
    private RandomAccessFile in;
    // 所有数据块的索引信息
    private SortedSet<BlockMeta> blockMetaSortedSet = new TreeSet<>();


    private String fname;

    // 一个DiskFile中有且仅有一个MetaBlock
    public static class BlockMeta {
        private static final int OFFSET_SIZE = 8;
        private static final int SIZE_SIZE = 8;
        private static final int BF_LEN_SIZE = 4;
        private KeyValue lastKeyValue;
        private long blockOffset = 0;
        private long blockSize = 0;
//        private long blockIndexSize = 0;
        private byte[] bloomFilter;

        public BlockMeta(KeyValue lastKeyValue, long blockOffset, long blockSize, byte[] bloomFilter) {
            this.blockOffset = blockOffset;
            this.blockSize = blockSize;
            this.lastKeyValue = lastKeyValue;
            this.bloomFilter = bloomFilter;
        }

        public KeyValue getLastKV() {
            return this.lastKeyValue;
        }

        public long getBlockOffset() {
            return this.blockOffset;
        }

        public long getBlockSize() {
            return this.blockSize;
        }

        public byte[] getBloomFilter() {
            return this.bloomFilter;
        }

        public int getSerializeSize() {
            return lastKeyValue.getSerializeSize() + OFFSET_SIZE + SIZE_SIZE + BF_LEN_SIZE + bloomFilter.length;
        }

        public byte[] toBytes() throws IOException {
            byte[] bytes = new byte[getSerializeSize()];
            int pos = 0;

            // encode lastKeyValue
            byte[] lastKeyValueBytes = lastKeyValue.toBytes();
            System.arraycopy(lastKeyValueBytes, 0, bytes, pos, lastKeyValueBytes.length);
            pos += lastKeyValueBytes.length;

            // encode blockOffset
            byte[] blockOffsetBytes = Bytes.toBytes(blockOffset);
            System.arraycopy(blockOffsetBytes, 0, bytes, pos, blockOffsetBytes.length);
            pos += blockOffsetBytes.length;

            //encode blockSize
            byte[] blockSizeBytes = Bytes.toBytes(blockSize);
            System.arraycopy(blockSizeBytes, 0, bytes, pos, blockSizeBytes.length);
            pos += blockSizeBytes.length;

            // encode length of bloomFilter
            byte[] blockFilterLenBytes = Bytes.toBytes(bloomFilter.length);
            System.arraycopy(blockFilterLenBytes, 0, bytes, pos, blockFilterLenBytes.length);
            pos += blockFilterLenBytes.length;

            // encode bloomFilter
            System.arraycopy(bloomFilter, 0, bytes, pos, bloomFilter.length);
            pos += bloomFilter.length;

            if (pos != bytes.length) {
                throw new IOException(
                        "pos(" + pos + ") should be equal to length of bytes (" + bytes.length + ")");
            }
            return bytes;
        }

        // 构造BlockMeta对象
        public static BlockMeta parseFrom(byte[] buffer, int offset) throws IOException {
            // decode lastKV 通过KeyValue的parseFrom方法去解析
            KeyValue keyValue = KeyValue.parseFrom(buffer, offset);
            offset += keyValue.getSerializeSize();

            // decode offset
            long blockOffset = Bytes.toLong(Bytes.slice(buffer, offset, OFFSET_SIZE));
            offset += OFFSET_SIZE;

            // decode block blockSize
            long blockSize = Bytes.toLong(Bytes.slice(buffer, offset, SIZE_SIZE));
            offset += SIZE_SIZE;

            //decode bloomFilter size
            int bfSize = Bytes.toInt(Bytes.slice(buffer, offset, BF_LEN_SIZE));
            offset += BF_LEN_SIZE;

            //decode bloomFilter
            byte[] bloomFilter = new byte[bfSize];
            System.arraycopy(buffer, offset, bloomFilter, 0, bloomFilter.length);
            offset += bfSize;

            assert offset <= buffer.length;
            return new BlockMeta(keyValue, blockOffset, blockSize, bloomFilter);
        }
    }


    public static class DiskFileWriter implements Closeable {
        private String fileName;
        private long currentOffset;
        private BlockWriter currentWriter;
        private BlockIndexWriter indexWriter;
        private FileOutputStream out;
        private long fileSize = 0;
        private int blockCount = 0;
        private long blockIndexOffset = 0;
        private long blockIndexSize = 0;

        public DiskFileWriter(String fileName) throws IOException {
            this.fileName = fileName;
            File f = new File(fileName);
            f.createNewFile();
            out = new FileOutputStream(f);
            currentOffset = 0;
            currentWriter = new BlockWriter();
            indexWriter = new BlockIndexWriter();
        }

        public void append(KeyValue kv) throws IOException {
            if (kv == null) return;
            assert kv.getSerializeSize() + BlockWriter.KV_SIZE_LEN + BlockWriter.CHECKSUM_LEN < BLOCK_SIZE_UP_LIMIT;
            // 一个快写满的时候就换下一个新的快，并更改indexBlock中的数据
            if (currentWriter.getKeyValueCount() > 0
                && (kv.getSerializeSize() + currentWriter.size()) > BLOCK_SIZE_UP_LIMIT) {
                switchNextBlockWriter();
            }
            // 当前块还没写满就继续写入
            currentWriter.append(kv);
        }

        private void switchNextBlockWriter() throws IOException {
            // 将当前块序列化并放到输出流中，更新indexBlock信息和总体的diskFile信息，新建一个块接受写入
            // indexBlock: 循环(lastKv, offset, size, bloomfilter)
            assert currentWriter.getLastKV() != null;
            byte[] buffer = currentWriter.serialize();
            out.write(buffer);
            indexWriter.append(currentWriter.getLastKV(), currentOffset, buffer.length, currentWriter.getBloomFilter());
            // 要更新的总的信息：currentOffset, blockCount
            currentOffset += buffer.length;
            blockCount += 1;
            // 更新新的块
            currentWriter = new BlockWriter();
        }

        public void appendIndex() throws IOException {
            // 处理最后一个数据块
            if (currentWriter.getKeyValueCount() > 0) {
                switchNextBlockWriter();
            }
            // 将indexBlock的信息放到输出流out
            byte[] buffer = indexWriter.serialize();
            // 更新file的Trailer信息
            blockIndexOffset = currentOffset;
            blockIndexSize = buffer.length;

            out.write(buffer);
            currentOffset += buffer.length;
        }

        public void appendTrailer() throws IOException{
            // 计算文件总大小fileSize，将几个属性一次昂儒输出流
            fileSize = currentOffset + TRAILER_SIZE;

            byte[] buffer = Bytes.toBytes(fileSize);
            out.write(buffer);

            buffer = Bytes.toBytes(blockCount);
            out.write(buffer);

            buffer = Bytes.toBytes(blockIndexOffset);
            out.write(buffer);

            buffer = Bytes.toBytes(blockIndexSize);
            out.write(buffer);
        }


        @Override
        public void close() throws IOException {
            if (out != null) {
                try {
                    out.flush();
                    FileDescriptor fd = out.getFD();
                    fd.sync();
                } finally {
                    out.close();
                }
            }
        }
    }

    // 在初始化diskStore时调用，可以验证数据是否正确被存储，同时将元数据构造成一个DiskFile对象存储到diskStore的列表中
    public void open(String fileName) throws IOException{
        // TODO
        this.fileName = fileName;
        File file = new File(fileName);
        in = new RandomAccessFile(file, "r");

        // 文件指针移动到指定位置，定位trailer
        long fLen = file.length();
        assert fLen > TRAILER_SIZE;
        in.seek(fLen - TRAILER_SIZE);

        // 解析trailer部分 TRAILER_SIZE = 8 + 4 + 8 + 8 + 8
        byte[] buffer = new byte[8];
        // read之后指针会自动变化
        assert in.read(buffer) == buffer.length;
        // 先解析fileSizze
        assert this.fileSize == Bytes.toLong(buffer);

        buffer = new byte[4];
        assert in.read(buffer) == buffer.length;
        this.blockCount = Bytes.toInt(buffer);

        buffer = new byte[8];
        assert in.read(buffer) == buffer.length;
        this.blockIndexOffset = Bytes.toLong(buffer);

        buffer = new byte[8];
        assert in.read(buffer) == buffer.length;
        this.blockIndexSize = Bytes.toLong(buffer);

        buffer = new byte[8];
        assert in.read(buffer) == buffer.length;
        assert this.DISK_FILE_MAGIC == Bytes.toLong(buffer);

        // 定位到索引块的位置开始读，循环构建BlockMeta对象，一个索引块中包含多个BlockMeta对象
        buffer = new byte[(int) blockIndexSize];
        in.seek(blockIndexOffset);
        int offset = 0;

        do {
            BlockMeta blockMeta = BlockMeta.parseFrom(buffer, offset);
            offset += blockMeta.getSerializeSize();
            blockMetaSortedSet.add(blockMeta);
        } while (offset < buffer.length);

        assert blockMetaSortedSet.size() == this.blockCount : "blockMetaSet.getSerializeSize:" + blockMetaSortedSet.size()
                + ", blockCount: " + blockCount;
    }

    // 一个DiskFile中有且仅有一个IndexBlock
    public static class BlockIndexWriter {
        private List<BlockMeta> blockMetas = new ArrayList<>();
        private int totalBytes = 0;

        public void append(KeyValue keyValue, long blockOffset, int blockSize, byte[] bloomFilter) {
            BlockMeta blockMeta = new BlockMeta(keyValue, blockOffset, blockSize, bloomFilter);
            blockMetas.add(blockMeta);
            totalBytes += blockMeta.getSerializeSize();
        }

        public byte[] serialize() throws IOException {
            byte[] bytes = new byte[totalBytes];
            int pos = 0;
            for (int i = 0; i < blockMetas.size(); i++) {
                byte[] buffer = blockMetas.get(i).toBytes();
                System.arraycopy(buffer, 0, bytes, pos, buffer.length);
            }
            assert pos == bytes.length;
            return bytes;
        }

    }

    public static class BlockWriter {
        private static int KV_SIZE_LEN = 4;
        private static int CHECKSUM_LEN = 4;
        // 所占用的存储空间
        private int totalSize;
        private List<KeyValue> kvBuf;
        private KeyValue lastKV;
        private BloomFilter bloomFilter;
        private Checksum crc32;
        private int kvCount;

        public int getChecksum() {
            return (int) crc32.getValue();
        }

        public KeyValue getLastKV() {
            return this.lastKV;
        }

        public int size() {
            return KV_SIZE_LEN + totalSize + CHECKSUM_LEN;
        }

        public int getKeyValueCount() {
            return kvCount;
        }


        public void append(KeyValue kv) throws IOException {
            // 更新lastKV，kvBuf，校验和，总大笑，总count
            lastKV = kv;
            kvBuf.add(kv);
            byte[] buf = kv.toBytes();
            crc32.update(buf, 0, buf.length);
            totalSize += kv.getSerializeSize();
            kvCount += 1;
        }

        public byte[] getBloomFilter() {
            byte[][] bytes = new byte[kvBuf.size()][];
            for (int i = 0; i < bytes.length; i++) {
                // 布隆过滤器只针对key
                byte[] key = kvBuf.get(i).getKey();
            }
            return bloomFilter.generate(bytes);
        }

        public byte[] serialize() {
            byte[] buffer = new byte[size()];
            int pos = 0;

            //append kv getSerializeSize
            byte[] kvSize = Bytes.toBytes(kvBuf.size());
            System.arraycopy(kvSize, 0, buffer, pos, kvSize.length);
            pos += kvSize.length;

            //append all keyValue
            for (int i = 0; i < kvBuf.size(); i++) {
                byte[] bytes = kvBuf.get(i).toBytes();
                System.arraycopy(bytes, 0, buffer, pos, bytes.length);
                pos += bytes.length;
            }

            //append checksum
            byte[] checksum = Bytes.toBytes(getChecksum());
            System.arraycopy(checksum, 0, buffer, pos, checksum.length);
            pos += checksum.length;

            assert pos == size();
            return buffer;
        }

    }
}
