package demo.spark.stream.format

import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.hadoop.mapreduce.{InputSplit, RecordReader, TaskAttemptContext}

class FileOffsetBytesFormat extends FileInputFormat[PathAndOffsetWritable,LengthUnfixedBytesWritable]{

  override def createRecordReader(split: InputSplit, context: TaskAttemptContext):
      RecordReader[PathAndOffsetWritable, LengthUnfixedBytesWritable] = {
    new FileOffSetBytesRecordReader
  }
}

object FileOffsetBytesFormat{
  val MAX_SEGMENT_LENGTH_PROPERTY = "demo.spark.stream.format.FileOffsetBytesFormat.maxSegmentLength"
}
