package org.egomsl.mw.mapreduce;

import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.mapreduce.*;
import org.egomsl.mw.HadoopRecord;

import java.io.IOException;
import java.util.LinkedList;
import java.util.List;

/*
    new hadoop api using abstract InputFormat class
 */
public class ABaseInputFormat extends InputFormat<BytesWritable, HadoopRecord> {
    public static String ABASE_TABLE_PATH_KEY = "abase.mapreduce.input.table.inputdir";

    @Override
    public List<InputSplit> getSplits(JobContext jobContext)
            throws IOException, InterruptedException {
        String inputPath = jobContext.getConfiguration().get(ABASE_TABLE_PATH_KEY);

        Path input = new Path(inputPath);
        FileSystem fs = input.getFileSystem(jobContext.getConfiguration());

        FileStatus[] statuses =
                fs.listStatus(input);

        List<InputSplit> splits = new LinkedList<>();
        for(FileStatus s : statuses) {
            if(s.isDirectory()) {
                Path dir = s.getPath();
                splits.add(new ABaseInputSplit(dir.toUri().toString()));
            }
        }

        return splits;
    }

    @Override
    public RecordReader<BytesWritable, HadoopRecord> createRecordReader(
            InputSplit inputSplit, TaskAttemptContext taskAttemptContext)
            throws IOException, InterruptedException {
        return new ABaseRecordReader();
    }

    public static void setTablePath(JobContext jobContext, String path) {
        jobContext.getConfiguration().set(ABASE_TABLE_PATH_KEY, path);
    }
}
