/**
 * 
 */
package mapreduce4j;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;

import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;

/**
 * An input format for reading text files delimited by \n
 * @author tim
 */
public class TextInputFormat extends InputFormat {
	// bytes to read at a time from the input
	public static final int READ_AHEAD_BYTES = 256; 
	
	/**
	 * @param fcin To read from
	 * @param startByte To start at
	 * @return The number of bytes read until a new line is encountered
	 * @throws IOException On file reading error
	 */
	@Override
	public long bytesUntilRecord(FileChannel fcin, long startByte) throws IOException {
		ByteBuffer bb = ByteBuffer.allocate(READ_AHEAD_BYTES);
		
		boolean found = false;
		long offset=0;
		// loop in case the READ_AHEAD_BYTES is not enough to reach the end
		while (!found) {
			bb.rewind();
			
			// need to stop if we are passed the end of file
			// TODO - we are missing something here!!!
			// the end of the file...
			if (startByte>=fcin.size())
				break;
			
			fcin.read(bb, startByte+offset);
			for (int i=0; i<bb.limit();i++) {
				offset++;
				// note we read bytes, not chars
				if ((char)bb.get(i) == '\n' ) {
					found = true;
					break;
				}
			}
		}
		
		// uhm... do we need to rewind the BB
		//bb.rewind();
		
		return offset;
	}

	@Override
	public RecordReader<LongWritable, Text> createRecordReader(ByteBuffer byteBuffer) {
		LineRecordReader reader = new LineRecordReader();
		reader.initialize(byteBuffer);
		return reader;
	}
}
