/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;

/**
 * FixedLengthInputFormat is an input format which can be used
 * for input files which contain fixed length records with NO
 * delimiters and NO carriage returns (CR, LF, CRLF) etc. Such
 * files typically only have one gigantic line and each "record"
 * is of a fixed length, and padded with spaces if the record's actual
 * value is shorter than the fixed length.<BR><BR>
 * 
 * Users must configure the record length property before submitting
 * any jobs which use FixedLengthInputFormat.<BR><BR>
 * 
 * FixedLengthInputFormat.setRecordLength(myJob,[myFixedRecordLength]);<BR><BR>
 * 
 * This input format overrides <code>computeSplitSize()</code> in order to ensure
 * that InputSplits do not contain any partial records since with fixed records
 * there is no way to determine where a record begins if that were to occur.
 * Each InputSplit passed to the FixedLengthRecordReader will start at the beginning
 * of a record, and the last byte in the InputSplit will be the last byte of a record.
 * The override of <code>computeSplitSize()</code> delegates to FileInputFormat's
 * compute method, and then adjusts the returned split size by doing the following:
 * <code>(Math.floor(fileInputFormatsComputedSplitSize / fixedRecordLength) * fixedRecordLength)</code>
 *
 * <BR><BR>
 * This InputFormat returns a FixedLengthRecordReader. <BR><BR>
 * 
 * Compressed files currently are not supported.
 *
 * @see    FixedLengthRecordReader
 *
 */
public class FixedLengthInputFormat extends FileInputFormat<LongWritable, Text> {


  /**
   * When using FixedLengthInputFormat you MUST set this
   * property in your job configuration to specify the fixed
   * record length.
   * <BR><BR>
   * 
   * i.e. myJobConf.setInt("mapreduce.input.fixedlengthinputformat.record.length",[myFixedRecordLength]);
   * <BR><BR>
   * OR<BR><BR>
   * FixedLengthInputFormat.setRecordLength(myJob,[myFixedRecordLength]);
   * 
   */
  public static final String FIXED_SPLIT_LENGTH = 
    "mapreduce.input.fixedlengthinputformat.split.length"; 

  public static final String FIXED_EXTRA_LENGTH = 
    "mapreduce.input.fixedlengthinputformat.extra.length"; 
  

  // our logger reference
  private static final Log LOG = 
    LogFactory.getLog(FixedLengthInputFormat.class);

  // the default fixed record length (-1), error if this does not change
  private int splitLength = -1;
  private int extraLength = -1;

  /**
   * Set the length of each record
   * @param job the job to modify
   * @param recordLength the length of a record
   */
  public static void setSplitLength(Job job, int splitLength) {
    job.getConfiguration().setInt(FIXED_SPLIT_LENGTH, splitLength);
  }

  public static void setExtraLength(Job job, int extraLength) {
    job.getConfiguration().setInt(FIXED_EXTRA_LENGTH, extraLength);
  }


  /**
   * Get the fixed record length
   * @param job the job
   * @return the length of a record
   */
  public static int getSplitLength(JobContext job) {
    return job.getConfiguration().getInt(FIXED_SPLIT_LENGTH, 0);
  }
  
  public static int getExtraLength(JobContext job) {
    return job.getConfiguration().getInt(FIXED_EXTRA_LENGTH, 0);
  }


  /**
   * Return the int value from the given Configuration found
   * by the FIXED_RECORD_LENGTH property.
   * 
   * @param config
   * @return    int record length value
   * @throws IOException if the record length found is 0 (non-existant, not set etc)
   */
  public static int getSplitLength(Configuration config) throws IOException {
    int splitLength = 
      config.getInt(FixedLengthInputFormat.FIXED_SPLIT_LENGTH, 0); 

    // this would be an error
    if (splitLength == 0) {
      throw new IOException("FixedLengthInputFormat requires the Configuration " +
              "property:" + FIXED_SPLIT_LENGTH + " to" +
              " be set to something > 0. Currently the value is 0 (zero)");
    }

    return splitLength;
  }

  public static int getExtraLength(Configuration config) throws IOException {
    int extraLength = 
      config.getInt(FixedLengthInputFormat.FIXED_EXTRA_LENGTH, 0); 

    // this would be an error
    if (extraLength == 0) {
      throw new IOException("FixedLengthInputFormat requires the Configuration " +
              "property:" + FIXED_EXTRA_LENGTH + " to" +
              " be set to something > 0. Currently the value is 0 (zero)");
    }

    return extraLength;
  }

  /**
   * This input format overrides <code>computeSplitSize()</code> in order to ensure
   * that InputSplits do not contain any partial records since with fixed records
   * there is no way to determine where a record begins if that were to occur.
   * Each InputSplit passed to the FixedLengthRecordReader will start at the beginning
   * of a record, and the last byte in the InputSplit will be the last byte of a record.
   * The override of <code>computeSplitSize()</code> delegates to FileInputFormat's
   * compute method, and then adjusts the returned split size by doing the following:
   * <code>(Math.floor(fileInputFormatsComputedSplitSize / fixedRecordLength) * fixedRecordLength)</code>
   * 
   * @inheritDoc
   */
  @Override
  protected long computeSplitSize(long blockSize, long minSize, long maxSize) {
/*    long defaultSize = 
      super.computeSplitSize(blockSize, minSize, maxSize);
    LOG.info("FixedLengthInputFormat: calculated default size: " + defaultSize + " with blocksize,minsize,maxsize " + blockSize + " " + minSize + " " + maxSize);
*/
    // 1st, if the default size is less than the length of a
    // raw record, lets bump it up to a minimum of at least ONE record length
/*    if (defaultSize <= recordLength) {
      return recordLength;
    }
*/
    // determine the split size, it should be as close as possible to the 
    // default size, but should NOT split within a record... each split
    // should contain a complete set of records with the first record
    // starting at the first byte in the split and the last record ending
    // with the last byte in the split.

//    long splitSize = this.splitLength;
    
//      ((long)(Math.floor((double)defaultSize / 
//          (double)recordLength))) * recordLength;
    
//    LOG.info("FixedLengthInputFormat: calculated split size: " + splitSize);

    return this.splitLength;
  }

  /**
   * Returns a FixedLengthRecordReader instance
   * 
   * @inheritDoc
   */
  @Override
  public RecordReader<LongWritable, Text> createRecordReader(InputSplit split,
      TaskAttemptContext context) throws IOException, InterruptedException {
    LOG.info("new FixedLengthRecordReader");
    return new FixedLengthRecordReader();
  }

  /**
   * @inheritDoc
   */
  @Override
  protected boolean isSplitable(JobContext context, Path file) {

    try {
//      if (this.recordLength == -1) {
        this.splitLength = getSplitLength(context.getConfiguration()); /*TODO*/
        this.extraLength = getExtraLength(context.getConfiguration()); /*TODO*/

//      }
//      LOG.info("FixedLengthInputFormat: my fixed record length is: " + recordLength);

    } catch(Exception e) {
		throw new RuntimeException("FixedLengthInputFormat requires the Configuration " +
                "property:" + FIXED_SPLIT_LENGTH + " to" +
	              " be set to something > 0. Currently the value is 0 (zero)");

    }
    return true; 
/*    CompressionCodec codec = 
      new CompressionCodecFactory(context.getConfiguration()).getCodec(file);
    if (codec != null) {
      return false; 
    }

    return true; */
  } 

  private static final double SPLIT_SLOP = 1.1;
  @Override
  public List<InputSplit> getSplits(JobContext job 
      ) throws IOException {
//    long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
//    long maxSize = getMaxSplitSize(job);

    // generate splits
    List<InputSplit> splits = new ArrayList<InputSplit>();
    for (FileStatus file: listStatus(job)) {
      Path path = file.getPath();
      FileSystem fs = path.getFileSystem(job.getConfiguration());
      long length = file.getLen();
      BlockLocation[] blkLocations = fs.getFileBlockLocations(file, 0, length);
      if ((length != 0) && isSplitable(job, path)) { 
//        long blockSize = file.getBlockSize();
//        long splitSize = computeSplitSize(blockSize, minSize, maxSize);//i dont need these
        long splitSize = computeSplitSize(0,0,0);//i dont need these

        long bytesRemaining = length;
//        while (((double) bytesRemaining)/splitSize > SPLIT_SLOP) {
        while (((double) bytesRemaining)-splitSize-extraLength+1 > 0) {
          int blkIndex = getBlockIndex(blkLocations, length-bytesRemaining);
          splits.add(new FileSplit(path, length-bytesRemaining, splitSize + extraLength - 1, //TODO: what about last piece
          //splits.add(new FileSplit(path, length-bytesRemaining, splitSize,
                blkLocations[blkIndex].getHosts()));
          bytesRemaining -= splitSize;
        }   

        if (bytesRemaining != 0) {//last split
          splits.add(new FileSplit(path, length-bytesRemaining, bytesRemaining, 
                blkLocations[blkLocations.length-1].getHosts()));
        }   
      } /*else if (length != 0) {
        splits.add(new FileSplit(path, 0, length, blkLocations[0].getHosts()));
      } else { 
        //Create empty hosts array for zero length files
        splits.add(new FileSplit(path, 0, length, new String[0]));
      }   */
    }  
    LOG.debug("Total # of splits: " + splits.size());
    return splits;
      }
}
