package org.egomsl.mw.mapred;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.mapred.*;
import org.egomsl.mw.HadoopRecord;
import org.egomsl.mw.record.Record;

import java.io.IOException;
import java.util.LinkedList;
import java.util.List;

/*
 old hadoop api using InputFormat interface
 */
public class ABaseInputFormat implements InputFormat<BytesWritable, HadoopRecord> {
    public static String ABASE_TABLE_PATH_KEY = "abase.mapred.input.table.dir";

    public static void setTablePath(Configuration conf, String path) {
        conf.set(ABASE_TABLE_PATH_KEY, path);
    }

    @Override
    public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
        String inputPath = job.get(ABASE_TABLE_PATH_KEY);

        Path input = new Path(inputPath);
        FileSystem fs = input.getFileSystem(job);

        FileStatus[] statuses =
                fs.listStatus(input);

        List<InputSplit> splits = new LinkedList<>();
        for(FileStatus s : statuses) {
            if(s.isDirectory()) {
                Path dir = s.getPath();
                splits.add(new ABaseInputSplit(dir.toUri().toString()));
            }
        }

        return splits.toArray(new InputSplit[0]);
    }

    @Override
    public RecordReader<BytesWritable, HadoopRecord> getRecordReader(
            InputSplit split, JobConf job, Reporter reporter) throws IOException {

        ABaseRecordReader recordReader = new ABaseRecordReader();
        recordReader.initialize(split, job);

        return recordReader;
    }
}
