package org.egomsl.mw.compaction;

import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.egomsl.mw.record.Record;
import org.egomsl.mw.record.RecordScanner;
import org.egomsl.mw.region.FileMeta;
import org.egomsl.mw.region.RegionReader;
import org.egomsl.mw.segment.MutableSegment;
import org.egomsl.mw.segment.SegmentBlockWriter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

public class RegionCompactor implements Runnable{
    private static Logger log = LoggerFactory.getLogger(RegionCompactor.class);

    protected FileSystem fs;
    protected Path regionDir;
    protected Path tmpFilePath;

    SegmentBlockWriter segmentBlockWriter;
    protected CompactionPolicy policy;

    public RegionCompactor(FileSystem fs, Path regionDir, CompactionPolicy policy) {
        this.fs = fs;
        this.regionDir = regionDir;
        this.tmpFilePath = new Path(regionDir, "tmp");
        this.segmentBlockWriter = new SegmentBlockWriter(fs);

        this.policy = policy;

        try {
            if(!fs.exists(tmpFilePath)) {
                fs.mkdirs(tmpFilePath);
            }
        } catch (IOException e) {
            log.error("Error creating tmp path: " + tmpFilePath);
        }
    }

    public boolean compact(List<FileMeta> files) throws IOException, InterruptedException {
        if(files == null || files.size() == 0) return true;

        Path compactedFilePath = files.get(files.size()-1).getPath();
        String tmpFileName = compactedFilePath.getName();
        Path tmpCompactedFilePath = new Path(tmpFilePath, tmpFileName);

        SegmentBlockWriter segmentBlockWriter = new SegmentBlockWriter(fs);
        MutableSegment segment = new MutableSegment(2, 1<<20);

        RegionReader regionReader = new RegionReader(fs, regionDir, files);
        RecordScanner recordScanner = regionReader.getScanner(true);

        List<Future<?>> flushTasks = new ArrayList<>();

        while(recordScanner.hashNext()) {
            Record record = recordScanner.next();
            if(!segment.add(record)) {
                flushTasks.add(
                        segmentBlockWriter.flush(segment.snapshot(), tmpCompactedFilePath, true)
                );
            }
        }
        flushTasks.add(
                segmentBlockWriter.flush(segment.snapshot(), tmpCompactedFilePath, true)
        );

        for(Future<?> task : flushTasks) {
            try {
                task.get();
            } catch (ExecutionException e) {
                log.error("Flush error: ", e);
            }
        }

        log.info("Apply compaction ...");
        for(FileMeta file : files) {
            log.info("delete file: " + file.getPath());
            fs.delete(file.getPath(), false);
        }
        log.info("rename file {} to {}", tmpCompactedFilePath, compactedFilePath);
        fs.rename(tmpCompactedFilePath, compactedFilePath);
        log.info("Compaction done.");

        return true;
    }

    public synchronized void run() {
        try {
            List<FileMeta> files = policy.getFilesForCompaction();
            if (files != null) {
                compact(files);
            }
        } catch (IOException e) {
            log.error("Compaction failed: ", e);
        } catch (InterruptedException e) {
            log.error("Compaction interrupted: ", e);
        }
    }
}
