/*************************************************************************************
* 	 Copyright (C) 2010 by Information Systems Group, Saarland University  			*
*    http://infosys.cs.uni-saarland.de												*
* 	 																				*
* 	 This file is part of Hadoop++.												 	*
*																					*
*    Hadoop++ is free software: you can redistribute it and/or modify				*
*    it under the terms of the GNU Lesser General Public License as published by	*
*    the Free Software Foundation, either version 3 of the License, or				*
*    (at your option) any later version.											*
*																					*
*    Hadoop++ is distributed in the hope that it will be useful,					*
*    but WITHOUT ANY WARRANTY; without even the implied warranty of					*
*    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the					*
*    GNU Lesser General Public License for more details.							*
*																					*
*    You should have received a copy of the GNU Lesser General Public License		*
*    along with Hadoop++.  If not, see <http://www.gnu.org/licenses/>.				*
*************************************************************************************/
package unisb.cs.core.join.cogroup.rearrange;

import java.io.IOException;
import java.util.ArrayList;

import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.InputSplit;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;

import unisb.cs.core.binary.reader.BinaryReaderInputFormat;
import unisb.cs.core.join.cogroup.structure.GroupHeader;
import unisb.cs.data.types.DataTypes;

/**
 * This class provides the input format for rearranging co-group data split.
 * 
 * 
 * 
 */
public class CogroupRearrangerInputFormat extends BinaryReaderInputFormat {
	private static final double SPLIT_SLOP = 1.1; // 10% slop
	private long minSplitSize = 1;

	// group attribute data type
	public static String GRP_ATTR_TYPE = "grpattrtype";
	private int grpAttrType;

	public RecordReader<Text, Text> getRecordReader(InputSplit split, JobConf conf, Reporter r) throws IOException {
		return new CogroupRearrangerRecordReader((FileSplit) split, conf);
	}

	/**
	 * This methods creates splits data at integral co-group boundaries.
	 * i.e. all tuples from the two relations having the same group key are in the same split.
	 * 
	 * Splits files returned by {@link #listStatus(JobConf)} when they're too big.
	 * 
	 */
	public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
		FileStatus[] files = listStatus(job);
		grpAttrType = Integer.parseInt(job.get(GRP_ATTR_TYPE));

		long totalSize = 0; // compute total size
		for (FileStatus file : files) { // check we have valid files
			if (file.isDir())
				throw new IOException("Not a file: " + file.getPath());
			totalSize += file.getLen();
		}

		long goalSize = totalSize / (numSplits == 0 ? 1 : numSplits);
		long minSize = Math.max(job.getLong("mapred.min.split.size", 1), minSplitSize);

		// generate splits
		int splitCount = 1;
		ArrayList<FileSplit> splits = new ArrayList<FileSplit>(numSplits);
		for (FileStatus file : files) {
			Path path = file.getPath();
			FileSystem fs = path.getFileSystem(job);
			long length = file.getLen();
			BlockLocation[] blkLocations = fs.getFileBlockLocations(file, 0, length);
			if ((length != 0) && isSplitable(fs, path)) {
				long blockSize = file.getBlockSize();
				long splitSize = computeSplitSize(goalSize, minSize, blockSize);

				long bytesRemaining = length;
				FSDataInputStream in = fs.open(path);
				while (((double) bytesRemaining) / splitSize > SPLIT_SLOP) {
					int blkIndex = getBlockIndex(blkLocations, length - bytesRemaining);
					
					// trim the split size to integral co-group boundaries
					long trimmedSize = trimSplitSize(in, length - bytesRemaining, bytesRemaining, splitSize);
					
					splits.add(new FileSplit(path, length - bytesRemaining, trimmedSize, blkLocations[blkIndex].getHosts()));
					job.set(splitId(path.getName(), length - bytesRemaining), "" + splitCount);
					splitCount++;

					bytesRemaining -= trimmedSize;
				}

				if (bytesRemaining != 0) {
					// trim the split size to integral co-group boundaries
					trimSplitSize(in, length - bytesRemaining, bytesRemaining, bytesRemaining);
					splits.add(new FileSplit(path, length - bytesRemaining, bytesRemaining, blkLocations[blkLocations.length - 1].getHosts()));
					job.set(splitId(path.getName(), length - bytesRemaining), "" + splitCount);
					splitCount++;
				}
			} else if (length != 0) {
				splits.add(new FileSplit(path, 0, length, blkLocations[0].getHosts()));
				job.set(splitId(path.getName(), 0), "" + splitCount);
				splitCount++;
			} else {
				// Create empty hosts array for zero length files
				splits.add(new FileSplit(path, 0, length, new String[0]));
				job.set(splitId(path.getName(), 0), "" + splitCount);
				splitCount++;
			}
		}
		return splits.toArray(new FileSplit[splits.size()]);
	}

	/**
	 * This method trims the data split to the previous integral co-group boundary.
	 * Starting from the beginning we seek and read the group headers till we have 
	 * just enough co-groups to fit within a split.
	 * 
	 * @param in The file input stream
	 * @param start The starting offset of the split
	 * @param bytesRemaining The number of bytes remaining to be split
	 * @param splitSize The default split size which must not be exceeded
	 * @return The trimmed split size
	 * @throws IOException
	 */
	private long trimSplitSize(FSDataInputStream in, long start, long bytesRemaining, long splitSize) throws IOException {
		int sizeHeaderLen = 1;
		in.seek(start);

		int offset = 0;
		GroupHeader prevG = null;
		GroupHeader g = getGroup(in);
		offset += (g.getGroupSize() + g.getHeaderSize() + sizeHeaderLen);
		
		// try to pack in as many co-groups within the split as possible
		while (offset < bytesRemaining && offset < splitSize) {
			prevG = g;
			try {
				g = getGroup(in);
			} catch (IOException e) {
				throw e;
			}
			offset += (g.getGroupSize() + g.getHeaderSize() + sizeHeaderLen);
		}
		if (offset > splitSize) {
			offset -= (g.getGroupSize() + g.getHeaderSize() + sizeHeaderLen);
			if (DataTypes.compare(g.getGroupValue(), prevG.getGroupValue(), grpAttrType) == 0) {
				offset -= (prevG.getGroupSize() + prevG.getHeaderSize() + sizeHeaderLen);
			}
		}

		return offset;
	}

	/**
	 * Read a group header from the input stream
	 * @param in The split data input stream
	 * @return The group header
	 * @throws IOException
	 */
	protected GroupHeader getGroup(FSDataInputStream in) throws IOException {
		int sizeHeaderLen = 1;

		byte[] sizeHeader = new byte[sizeHeaderLen];
		IOUtils.readFully(in, sizeHeader, 0, sizeHeaderLen);

		int headerSize = GroupHeader.getSize(sizeHeader);
		byte[] header = new byte[headerSize];
		IOUtils.readFully(in, header, 0, headerSize);

		GroupHeader gh = new GroupHeader(header);
		IOUtils.skipFully(in, gh.getGroupSize());

		return gh;
	}
}
