package org.egomsl.mw.segment;

import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.egomsl.mw.CodecUtils;
import org.egomsl.mw.Constants;
import org.egomsl.mw.block.Block;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.Iterator;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;

/**
 * 异步刷盘，多线程情况下文件追加操作会导致乱序的问题，目前只能以单线程运行。
 */
public class SegmentBlockWriter {
    private static Logger log = LoggerFactory.getLogger(SegmentBlockWriter.class);

    private FileSystem fs;
    private ExecutorService executorService;


    public SegmentBlockWriter(FileSystem fs) {
        this(fs, 1);
    }

    public SegmentBlockWriter(FileSystem fs, int nWorkers) {
        this.fs = fs;
        this.executorService = Executors.newFixedThreadPool(nWorkers);
    }

    /**
     * note:
     *  in normal case
     *       1. Reversed block is not allowed to be appended to existing file.
     *       2. Reversed file is not allowed to append any new block
     *
     *  in compaction case
     *       1. we don't care about the order in segment file, so blocks are appended to one segment file.
     *          Then we read blocks as if it is a reversed segment file.
     * @param segment   segment to be flushed
     * @param filePath  target file path
     * @param append    whether append to an existing (first is first) file.
     *
     * @return
     */
    public Future<?> flush(ImmutableSegment segment, Path filePath, boolean append) {

        Future<?> future = executorService.submit(new Runnable() {
            @Override
            public void run() {
                log.info("Flush began: {}, append={}, size={}",
                        filePath.toUri(), append, segment.getTotalSize());

                FSDataOutputStream fsdos = null;
                boolean shouldWriteFileHeader = true;
                try {
                    if(append && fs.exists(filePath)) {
                        fsdos = fs.append(filePath);
                        shouldWriteFileHeader = false;
                    }else {
                        fsdos = fs.create(filePath);
                    }

                    //file meta header
                    if(shouldWriteFileHeader) {
                        if (segment.isReversed()) {
                            fsdos.writeInt(Constants.FILE_HEADER_ORDER_MASK);
                        } else {
                            fsdos.writeInt(0);
                        }
                    }

                    Iterator<Block> blockIterator = segment.getBlocks();
                    while(blockIterator.hasNext()) {
                        Block recordBlock = blockIterator.next();

                        if(recordBlock.getLength() > Constants.BLOCK_HEADER_COMP_SIZE) {
                            byte[] compressedBlock = CodecUtils.compress(
                                    recordBlock.getBytes(), recordBlock.getOffset(),
                                    recordBlock.getLength());
                            //block header
                            fsdos.write(Constants.BLOCK_HEADER_COMP_MASK);
                            fsdos.writeInt(compressedBlock.length);
                            fsdos.write(compressedBlock);
                        }else {
                            //block header
                            fsdos.write(0);
                            fsdos.writeInt(recordBlock.getLength());
                            fsdos.write(recordBlock.getBytes(),
                                    recordBlock.getOffset(), recordBlock.getLength());
                        }
                    }
                    fsdos.flush();

                    log.info("Flush done: "+filePath.toUri());
                } catch (IOException e) {
                    log.error("flush error: ", e);
                }finally {
                    try {
                        fsdos.close();
                    } catch (IOException e) {}
                }
            }
        });

        return future;
    }

}
