/*************************************************************************************
* 	 Copyright (C) 2010 by Information Systems Group, Saarland University  			*
*    http://infosys.cs.uni-saarland.de												*
* 	 																				*
* 	 This file is part of Hadoop++.												 	*
*																					*
*    Hadoop++ is free software: you can redistribute it and/or modify				*
*    it under the terms of the GNU Lesser General Public License as published by	*
*    the Free Software Foundation, either version 3 of the License, or				*
*    (at your option) any later version.											*
*																					*
*    Hadoop++ is distributed in the hope that it will be useful,					*
*    but WITHOUT ANY WARRANTY; without even the implied warranty of					*
*    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the					*
*    GNU Lesser General Public License for more details.							*
*																					*
*    You should have received a copy of the GNU Lesser General Public License		*
*    along with Hadoop++.  If not, see <http://www.gnu.org/licenses/>.				*
*************************************************************************************/
package unisb.cs.core.binary.converter;

import java.io.IOException;
import java.util.ArrayList;

import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;

/**
 * This class provides the input format for the text file to be converted to binary.
 * The current implementation supports only single relation. If there are multiple
 * files (input path) we assume all of them to belong to the same relation.
 * 
 * Specifically, this class does two things:
 *  1. Create the data splits to be processed by the mappers. Each split should contain 
 *     integral number of tuples, i.e. we should split at tuple boundaries. 
 *  2. Provide the record reader (itemize) which will shred the data split into tuples. 
 * 
 */
public class BinaryConverterInputFormat extends FileInputFormat<Text, Text> {

	/**
	 * Private variables from FileInputFormat
	 */
	private static final double SPLIT_SLOP = 1.1; // 10% slop
	private long minSplitSize = 1;

	/** The maximum tuple size of any tuple in the data */
	public static String MAX_TUPLE_SIZE = "maxTupleSize";


	/**
	 * Create a split identifier using the data filename and the
	 * start offset of the split within that file. 
	 * @param filename The name of the file containing this split
	 * @param offset The start offset of the split within this file
	 * @return The split identifier
	 */
	public static String splitId(String filename, long offset) {
		return ("SPLIT_ID_" + filename + "_" + offset);
	}

	/**
	 * Get the record reader
	 */
	public RecordReader<Text, Text> getRecordReader(InputSplit split, JobConf conf, Reporter r) throws IOException {
		return new BinaryConverterRecordReader(conf, (FileSplit) split);
	}

	/**
	 * Splits files returned by {@link #listStatus(JobConf)} when they're too big
	 * Override the getSplits method from FileInputFormat.
	 * 
	 * The default getSplits() splits data only by data size. However, in this 
	 * implementation we want to split only at tuple boundaries.
	 */
	@Override
	public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
		FileStatus[] files = listStatus(job);

		long totalSize = 0; // compute total size
		for (FileStatus file : files) { // check we have valid files
			if (file.isDir()) {
				throw new IOException("Not a file: " + file.getPath());
			}
			totalSize += file.getLen();
		}

		long goalSize = totalSize / (numSplits == 0 ? 1 : numSplits);
		long minSize = Math.max(job.getLong("mapred.min.split.size", 1), minSplitSize);

		// generate splits
		int splitCount = 1;
		ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
		for (FileStatus file : files) {
			Path path = file.getPath();

			FileSystem fs = path.getFileSystem(job);
			long length = file.getLen();
			BlockLocation[] blkLocations = fs.getFileBlockLocations(file, 0, length);
			if ((length != 0) && isSplitable(fs, path)) {
				long blockSize = file.getBlockSize();
				long splitSize = computeSplitSize(goalSize, minSize, blockSize);

				long bytesRemaining = length;
				FSDataInputStream in = fs.open(path);
				while (((double) bytesRemaining) / splitSize > SPLIT_SLOP) {
					int blkIndex = getBlockIndex(blkLocations, length - bytesRemaining);
					
					/** Trim the split size in order to respect record boundaries */
					long trimmedSize = trimSplitSize(in, length - bytesRemaining, splitSize, Integer.parseInt(job.get(MAX_TUPLE_SIZE)));
					//System.out.println("Trimmed Split Size:" + trimmedSize);
					
					splits.add(new FileSplit(path, length - bytesRemaining, trimmedSize, blkLocations[blkIndex].getHosts()));
					job.set(splitId(path.getName(), length - bytesRemaining), "" + splitCount);
					splitCount++;

					bytesRemaining -= trimmedSize;
				}

				if (bytesRemaining != 0) {
					// no need to trim the last remaining split (assuming that in all the file has integral records!)
					splits.add(new FileSplit(path, length - bytesRemaining, bytesRemaining, blkLocations[blkLocations.length - 1].getHosts()));
					job.set(splitId(path.getName(), length - bytesRemaining), "" + splitCount);
					splitCount++;
				}
			} else if (length != 0) {
				splits.add(new FileSplit(path, 0, length, blkLocations[0].getHosts()));
				job.set(splitId(path.getName(), 0), "" + splitCount);
				splitCount++;
			} else {
				// Create empty hosts array for zero length files
				splits.add(new FileSplit(path, 0, length, new String[0]));
				job.set(splitId(path.getName(), 0), "" + splitCount);
				splitCount++;
			}
		}
		System.out.println("Total # of splits: " + splits.size());
		return splits.toArray(new FileSplit[splits.size()]);
	}

	/**
	 * Trim the split size to the previous record boundary.
	 * 
	 * This method reads searches for record separator (newline) in the
	 * last maxTupleSize bytes from the current offset. The offset where
	 * the record separator is found becomes the split boundary.
	 * 
	 * @param in Split data input stream  
	 * @param start Starting offset of the data split
	 * @param splitSize The computed split size
	 * @param maxTupleSize The maximum size (considering VRACHARs) of any tuple in the dataset
	 * @return The trimmed split size, aligning the split at record boundaries
	 * @throws IOException
	 */
	private long trimSplitSize(FSDataInputStream in, long start, long splitSize, int maxTupleSize) throws IOException {
		// seek and read the last maxTupleSize bytes from the split
		in.seek(start + splitSize - maxTupleSize);
		byte[] lastTuple = new byte[maxTupleSize];
		IOUtils.readFully(in, lastTuple, 0, maxTupleSize);
		
		// find record separator (newline by default) within the read bytes
		for (int i = 0; i < maxTupleSize; i++) {
			if (new String(new byte[] { lastTuple[i] }).equals("\n")) {
				return (splitSize - maxTupleSize + i + 1);
			}
		}
		
		// return the trimmed size
		return splitSize;
	}
}
