/**
 * 
 */
package mapreduce4j;

import java.nio.ByteBuffer;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;

/**
 * @author tim
 */
public class LineRecordReader extends RecordReader<LongWritable, Text> {
	protected ByteBuffer buffer;
	// TODO 
	// need to do something sensible here.
	// Hadoop splits the data during submission so knows the number of rows upfront
	// since it can ask for the number of splits
	// We on the other hand chunk into blocks for mappers, and then split into records in 
	// parallel before we ever know how many rows are in a chunk.  An ordered row number is therefore
	// not possible to deduce
	protected static long rowNumber=0;
	protected Text rowText;
	public static final char lineTerminator = '\n';
	

	@Override
	public LongWritable getCurrentKey() {
		return new LongWritable(rowNumber);
	}

	@Override
	public Text getCurrentValue() {
		return rowText;
	}

	@Override
	public void initialize(ByteBuffer input) {
		// make a read only copy just in case
		buffer = input.asReadOnlyBuffer();
	}

	@Override
	public boolean nextKeyValue() {
		StringBuffer sb = new StringBuffer();
		while (buffer.hasRemaining()) {
			char value = (char)buffer.get();
			
			if (lineTerminator == value || !buffer.hasRemaining()) {
				if (!buffer.hasRemaining()) {
					sb.append(value);
				}
				rowNumber++;
				rowText = new Text(sb.toString());
				return true;
			} else {
				sb.append(value);
			}
		}
		return false;
	}
}
