package org.egomsl.mw.table;

import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.egomsl.mw.record.Record;
import org.egomsl.mw.region.RegionWriter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.Future;

public class TableWriter {
    private static Logger log = LoggerFactory.getLogger(TableWriter.class);

    private FileSystem fs;
    private TableDesc tableDesc;

    private ArrayList<RegionWriter> regionWriters;


    public TableWriter(FileSystem fs, TableDesc tableDesc) throws IOException {
        this.fs = fs;
        this.tableDesc = tableDesc;

        this.regionWriters = new ArrayList<>();
        for(Path regionDir : tableDesc.getPartitionDirs()) {
            this.regionWriters.add(new RegionWriter(fs, regionDir));
        }
    }

    public TableWriter(FileSystem fs, TableDesc tableDesc, int maxBlock, int blockSize) throws IOException {
        this.fs = fs;
        this.tableDesc = tableDesc;

        this.regionWriters = new ArrayList<>();
        for(Path regionDir : tableDesc.getPartitionDirs()) {
            this.regionWriters.add(new RegionWriter(fs, regionDir, maxBlock, blockSize));
        }
    }

    public void add(Record record) {
        if(record == null) return;

        int hashCode = record.getKey().hashCode();
        int partitionIndex = findPartition(tableDesc.getPartions(),
                tableDesc.getNumVirtualNode(), hashCode);
        regionWriters.get(partitionIndex).add(record);
    }

    /**
     * todo：处理中途失败的情况
     */
    public void flush() {
        List<Future<?>> futures = new ArrayList<>();
        for(RegionWriter writer : regionWriters) {
            writer.flush();
            futures.add(writer.getSegmentWriterFuture());
        }
        for(Future<?> future : futures) {
            try {
                future.get();
            }catch (Exception e) {
                log.error("Error when wating for flush done. ", e);
            }
        }
    }

    public void close() {
        flush();
    }

    /**
     *
     * @param partitions
     * @param hashCode
     * @return index such that partitions[index]<=hashCode<partitions[index+1]
     */
    public static int findPartition(int[] partitions, int numVirtualNode, int hashCode) {
        hashCode = hashCode % numVirtualNode;
        while(hashCode < 0) {
            hashCode += numVirtualNode;
        }

        int targetPartitionIndex = Arrays.binarySearch(partitions, hashCode);

        if(targetPartitionIndex >= 0) return targetPartitionIndex;
        return -(targetPartitionIndex+2);
    }
}
