/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

import java.io.IOException;

import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Job;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.mapreduce.Counter;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.MapContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;


/**
 * 
 * FixedLengthRecordReader is returned by FixedLengthInputFormat. This reader
 * uses the record length property set within the FixedLengthInputFormat to 
 * read one record at a time from the given InputSplit. This record reader
 * does not support compressed files.<BR><BR>
 * 
 * Each call to nextKeyValue() updates the LongWritable KEY and Text VALUE.<BR><BR>
 * 
 * KEY = byte position in the file the record started at<BR>
 * VALUE = the record itself (Text)
 * 
 * @see FixedLengthInputFormat
 *
 */
public class FixedLengthRecordReader extends RecordReader<LongWritable, Text> {


  // reference to the logger
  protected static final Log LOG = 
    LogFactory.getLog(FixedLengthRecordReader.class);

  // the start point of our split
  protected long splitStart;

  // the end point in our split
  protected long splitEnd; 

  // our current position in the split
  protected long currentPosition;

  // the length of a record
  protected int recordLength; 

  // the extra length so we don't lose records
  protected int extraLength;

  // reference to the input stream
  protected FSDataInputStream fileInputStream;

  // the input byte counter
  protected Counter inputByteCounter; 

  // reference to our FileSplit
  protected FileSplit fileSplit;

  // our record key (byte position)
  protected LongWritable recordKey = null;

  // the record value
  protected Text recordValue = null; 

  // the length of the file
  protected long lengthOfFile;

  @Override
  public void close() throws IOException {
    if (fileInputStream != null) {
      fileInputStream.close();
    }
  }

  @Override
  public LongWritable getCurrentKey() throws IOException,
  InterruptedException {
    return recordKey;
  }

  @Override
  public Text getCurrentValue() throws IOException, InterruptedException {
    return recordValue;
  }

  @Override
  public float getProgress() throws IOException, InterruptedException {
    if (splitStart == splitEnd) {
      return (float)0;
    } else {
      return Math.min((float)1.0, (currentPosition - splitStart) / 
          (float)(splitEnd - splitStart));
    } 
  }

  @Override
  public void initialize(InputSplit inputSplit, TaskAttemptContext context)
  throws IOException, InterruptedException {

    // the file input fileSplit
    this.fileSplit = (FileSplit)inputSplit;

    // the byte position this fileSplit starts at within the splitEnd file
    splitStart = fileSplit.getStart();

    // splitEnd byte marker that the fileSplit ends at within the splitEnd file
    splitEnd = splitStart + fileSplit.getLength();

    // log some debug info
    LOG.info("FixedLengthRecordReader: SPLIT START="+splitStart + 
        " SPLIT END=" +splitEnd + " SPLIT LENGTH="+fileSplit.getLength());

    // the actual file we will be reading from
    Path file = fileSplit.getPath(); 

    // job configuration
    Configuration job = context.getConfiguration(); 

    // check to see if compressed....
    CompressionCodec codec = new CompressionCodecFactory(job).getCodec(file);
    if (codec != null) {
      throw new IOException("FixedLengthRecordReader does not support reading " +
              "compressed files");
    }

    // for updating the total bytes read in 
//    inputByteCounter = 
//      ((MapContext)context).getCounter(FileInputFormat.COUNTER_GROUP, 
//          FileInputFormat.BYTES_READ); 
    
    // the size of each fixed length record
    this.extraLength = FixedLengthInputFormat.getExtraLength(job);

    // get the filesystem
    final FileSystem fs = file.getFileSystem(job); 

    // open the File
    fileInputStream = fs.open(file); 

    // seek to the splitStart position
    fileInputStream.seek(splitStart);

    // set our current position
    this.currentPosition = splitStart;

    lengthOfFile = fs.getFileStatus(file).getLen();
  }

  @Override
  public boolean nextKeyValue() throws IOException, InterruptedException {
    if (recordKey == null) {
      recordKey = new LongWritable();
    }

    // the Key is always the position the record starts at
    recordKey.set(currentPosition);

    // the recordValue to place the record text in
    if (recordValue == null) {
      recordValue = new Text();
    } else {
      recordValue.clear();
    }
    // if the currentPosition is less than the split end..

    if (currentPosition < splitEnd) {//ME
      // setup a buffer to store the record
      int totalToRead = (int) (splitEnd - currentPosition);
      int length = (int) (splitEnd - currentPosition);
      //System.out.println("totalToRead: " + totalToRead);
      byte[] buffer = new byte[totalToRead];
      int totalRead = 0; // total bytes read
      // while we still have record bytes to read
      while(totalRead != length) {
        // read in what we need
        int read = this.fileInputStream.read(buffer, 0, totalToRead);

        /* EOF? this is an error because each 
         * split calculated by FixedLengthInputFormat
         * contains complete records, if we receive 
         * an EOF within this loop, then we have
         * only read a partial record as totalRead != recordLength
         */
        if (read == -1) {
        	throw new IOException("FixedLengthRecordReader, " +
        	        " unexpectedly encountered an EOF when attempting" +
        			" to read in an entire record from the current split");
        }
        
        // read will never be zero, because read is only
        // zero if you pass in zero to the read() call above
        
        // append to the buffer
        recordValue.append(buffer,0,read);
        // update our markers
        totalRead += read;
        totalToRead -= read;
      }

      // update our current position and log the input bytes

      currentPosition = splitEnd;
      this.fileInputStream.seek(currentPosition);//ME

      return true;             
    } 

    // nothing more to read....
    return false;
  }

}
