/**
 * 
 */
package mapreduce4j;

import java.io.File;
import java.nio.ByteBuffer;

/**
 * A record reader is required to generate the key and values for a record, and also handle moving through 
 * a byte stream.  
 * Note: This differs from the Hadoop implementation as Hadoop will generate InputSplits before feeding into the 
 * reader.  Here, we don't pre split the data, but just stream it and read as we stream.
 * 
 * @author tim
 */
public abstract class RecordReader<KEY_IN, VALUE_IN> {
	
	/**
	 * @return The key from the current record
	 */
	public abstract KEY_IN getCurrentKey();
	
	/**
	 * @return The value from the current record
	 */
	public abstract VALUE_IN getCurrentValue();
	
	/**
	 * @param input File to read
	 */
	public abstract void initialize(ByteBuffer input);
	
	/**
	 * @return true if there was another key/value to read, otherwise false
	 */
	public abstract boolean nextKeyValue();
}
