/*************************************************************************************
* 	 Copyright (C) 2010 by Information Systems Group, Saarland University  			*
*    http://infosys.cs.uni-saarland.de												*
* 	 																				*
* 	 This file is part of Hadoop++.												 	*
*																					*
*    Hadoop++ is free software: you can redistribute it and/or modify				*
*    it under the terms of the GNU Lesser General Public License as published by	*
*    the Free Software Foundation, either version 3 of the License, or				*
*    (at your option) any later version.											*
*																					*
*    Hadoop++ is distributed in the hope that it will be useful,					*
*    but WITHOUT ANY WARRANTY; without even the implied warranty of					*
*    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the					*
*    GNU Lesser General Public License for more details.							*
*																					*
*    You should have received a copy of the GNU Lesser General Public License		*
*    along with Hadoop++.  If not, see <http://www.gnu.org/licenses/>.				*
*************************************************************************************/
package unisb.cs.core.binary.reader;

import java.io.IOException;
import java.util.ArrayList;

import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;

import unisb.cs.core.binary.utils.BinaryUtils;

/**
 * This class provides the input format for a binary encoded data file.
 * 
 * Specifically, this class provides two things:
 *  1. Splits the binary data using split footers
 *  2. Provides the reader reader to shred the split into tuples.
 *
 */
public class BinaryReaderInputFormat extends FileInputFormat<Text, Text> {

	/** The size of the split footer containing the size of the split */
	public final static int SPLIT_FOOTER_SIZE = 8;

	/**
	 * Create a split identifier using the data filename and the
	 * start offset of the split within that file. 
	 * @param filename The name of the file containing this split
	 * @param offset The start offset of the split within this file
	 * @return The split identifier
	 */
	public static String splitId(String filename, long offset) {
		return ("SPLIT_ID_" + filename + "_" + offset);
	}

	/**
	 * Supply the record reader for the binary reader
	 */
	@Override
	public RecordReader<Text, Text> getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException {
		try {
			return new BinaryReaderRecordReader((FileSplit) split, job);
		} catch (Exception e) {
			e.printStackTrace();
		}
		return null;
	}

	/**
	 * Splits files returned by {@link #listStatus(JobConf)} when they're too big.
	 * Override getSplits method in order to create splits using the split footers.
	 */
	public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
		FileStatus[] files = listStatus(job);

		long totalSize = 0; // compute total size
		for (FileStatus file : files) { // check we have valid files
			if (file.isDir()) {
				throw new IOException("Not a file: " + file.getPath());
			}
			totalSize += file.getLen();
		}

		// generate splits
		int splitCount = 1;
		ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
		for (FileStatus file : files) {
			Path path = file.getPath();

			FileSystem fs = path.getFileSystem(job);
			long length = file.getLen();
			BlockLocation[] blkLocations = fs.getFileBlockLocations(file, 0, length);
			if ((length != 0) && isSplitable(fs, path)) {
				long bytesRemaining = length;
				FSDataInputStream in = fs.open(path);

				while (bytesRemaining > 0) {
					// get the split size from the split footer
					long splitSize = getSplitSize(in, bytesRemaining);
					// System.out.println("Split Size:"+splitSize);
					
					int blkIndex = getBlockIndex(blkLocations, bytesRemaining - splitSize - SPLIT_FOOTER_SIZE);
					splits.add(new FileSplit(path, bytesRemaining - splitSize - SPLIT_FOOTER_SIZE, splitSize, blkLocations[blkIndex].getHosts()));
					job.set(splitId(path.getName(), bytesRemaining - splitSize - SPLIT_FOOTER_SIZE), "" + splitCount);
					splitCount++;

					bytesRemaining -= (splitSize + SPLIT_FOOTER_SIZE);
				}

			} else if (length != 0) {
				splits.add(new FileSplit(path, 0, length, blkLocations[0].getHosts()));
				job.set(splitId(path.getName(), 0), "" + splitCount);
				splitCount++;

			} else {
				// Create empty hosts array for zero length files
				splits.add(new FileSplit(path, 0, length, new String[0]));
				job.set(splitId(path.getName(), 0), "" + splitCount);
				splitCount++;
			}
		}
		System.out.println("Total # of splits: " + splits.size());
		return splits.toArray(new FileSplit[splits.size()]);
	}

	/**
	 * Get the split size from the split footer
	 * @param in Spit data input stream
	 * @param offset The end offset (including the footer) of the split
	 * @return Split size
	 * @throws IOException
	 */
	private long getSplitSize(FSDataInputStream in, long offset) throws IOException {
		// seek and read the split footer
		if (offset >= SPLIT_FOOTER_SIZE)
			in.seek(offset - SPLIT_FOOTER_SIZE);
		byte[] sizeHeader = new byte[SPLIT_FOOTER_SIZE];
		IOUtils.readFully(in, sizeHeader, 0, sizeHeader.length);
		
		// return the long value from the footer
		return BinaryUtils.getLong(sizeHeader, 0);
	}
}