/**
 * 
 */
package mapreduce4j;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;

/**
 * An input format is required for each type of input file, and will
 * typically come with a record reader
 * 
 * MapReduce4J differs from Hadoop here as an input format is required to 
 * offer the ability to read from an arbitrary byte stream and report the bytes
 * read to get to the next record split.  This means that MapReduce4J will not
 * generate the Splits into memory, but instead allow the splits offsets be 
 * identified in the input files, such that the input files can be split at record
 * boundaries and passed into the mappers.  A record reader still provides the 
 * record oriented view to the data.
 *  
 * @author tim
 */
public abstract class InputFormat<KEY,VALUE> {
	
	public abstract long bytesUntilRecord(FileChannel fcin, long startByte) throws IOException;
	
	public abstract RecordReader<KEY,VALUE> createRecordReader(ByteBuffer byteBuffer);
}
