/**
 * @author 
 * 
 */
package com.ccb.textinputformat;

import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;

 

import com.ccb.recordreader.CustomRecordReader;
import com.google.common.base.Charsets;

/**
 * An {@link InputFormat} for plain text files. Files are broken into lines. Either linefeed or carriage-return are used to signal end of line. Keys are the position in the file, and values are the line of text..
 */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class CustomStartEndDelimiter extends TextInputFormat {

	@Override
	public RecordReader<LongWritable, Text> createRecordReader(InputSplit split, TaskAttemptContext context) {

		byte[] endRecordDelimiterBytes = null;
		byte[] startRecordDelimiterBytes = null;
		String enddelimiter = context.getConfiguration().get("textinputformat.record.enddelimiter");
		String startdelimiter = context.getConfiguration().get("textinputformat.record.startdelimiter");
		if (null != enddelimiter) {
			endRecordDelimiterBytes = enddelimiter.getBytes(Charsets.UTF_8);
			startRecordDelimiterBytes = startdelimiter.getBytes(Charsets.UTF_8);
		}
		return new CustomRecordReader(startRecordDelimiterBytes, endRecordDelimiterBytes);
		
	}

}
