package org.egomsl.mw.compaction;

import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.egomsl.mw.region.FileMeta;
import org.egomsl.mw.region.RegionUtils;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

public class SimpleCompactionPolicy extends CompactionPolicy{
    private int maxFileCount;
    private long maxFileSize;
    private long minFileSize;

    public SimpleCompactionPolicy(FileSystem fs, Path regionDir) {
        super(fs, regionDir);

        maxFileCount = 10;
        maxFileSize = 1 * (1<<30); //1GB
        minFileSize = 64 * (1<<20); //64M
    }

    @Override
    List<FileMeta> getFilesForCompaction() throws IOException {
        List<FileMeta> files = RegionUtils.getSegmentFiles(fs, regionDir);
        if(files == null || files.size() < 2) return null;

        if(files.size() > 15) {
            minFileSize = minFileSize * (files.size() - 15);
        }

        int i=0;
        long size = 0;
        List<FileMeta> filesForCompation = new ArrayList<>();
        while(i<files.size() && size < maxFileSize && filesForCompation.size() < maxFileCount) {
            FileMeta file = files.get(i);

            //skip the sequential file which may be appending blocks
            if(i== 0 && (!file.isReversed())) {i++; continue;}

            if(file.getSize() < minFileSize) {
                size += file.getSize();
                filesForCompation.add(file);
            }else break;

            i++;
        }

        return filesForCompation;
    }
}
