/*************************************************************************************
* 	 Copyright (C) 2010 by Information Systems Group, Saarland University  			*
*    http://infosys.cs.uni-saarland.de												*
* 	 																				*
* 	 This file is part of Hadoop++.												 	*
*																					*
*    Hadoop++ is free software: you can redistribute it and/or modify				*
*    it under the terms of the GNU Lesser General Public License as published by	*
*    the Free Software Foundation, either version 3 of the License, or				*
*    (at your option) any later version.											*
*																					*
*    Hadoop++ is distributed in the hope that it will be useful,					*
*    but WITHOUT ANY WARRANTY; without even the implied warranty of					*
*    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the					*
*    GNU Lesser General Public License for more details.							*
*																					*
*    You should have received a copy of the GNU Lesser General Public License		*
*    along with Hadoop++.  If not, see <http://www.gnu.org/licenses/>.				*
*************************************************************************************/
package tpc.benchmarks.aggregateOnIndex;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.RecordReader;

import unisb.cs.core.binary.utils.BinaryUtils;
import unisb.cs.data.tables.TableObjectProxy;
/**
 * This class set the uservisit.sourceIP and uservisits.adRevenue as
 * <key, value> pairs for the map function.
 */
public class AggregationTaskRecordReader implements RecordReader<Text, Text> {
	protected FileSplit fileSplit;
	protected FSDataInputStream in;
	protected int splitSize = 0;
	protected int offset = 0;
	protected byte[] contents;
	
	protected int[] keyFieldIndex;
	protected int[] valueFieldIndex;
	protected String tableName;

	public AggregationTaskRecordReader(FileSplit fileSplit, Configuration job)
	throws IOException {
		this.fileSplit = fileSplit;
		Path file = fileSplit.getPath();
		FileSystem fs = file.getFileSystem(job);
		this.in = fs.open(file);
		System.out.println("Split Size: " + fileSplit.getLength());
		splitSize = (int) fileSplit.getLength();
		contents = new byte[splitSize];
		IOUtils.skipFully(in, fileSplit.getStart());
		IOUtils.readFully(in, contents, offset, splitSize);
		
		String [] keyStringFieldIndex = job.get("key.field.index").split("|");
		String [] valueStringFieldIndex = job.get("value.field.index").split("|");
		keyFieldIndex = new int[keyStringFieldIndex.length];
		valueFieldIndex = new int[valueStringFieldIndex.length];
		for(int i = 0; i < keyStringFieldIndex.length; i++)
			keyFieldIndex[i] = Integer.valueOf(keyStringFieldIndex[i]);
		for(int i = 0; i < valueStringFieldIndex.length; i++)
			valueFieldIndex[i] = Integer.valueOf(valueStringFieldIndex[i]);
		tableName = job.get("table.name");
	}

	@Override
	public void close() throws IOException {
		IOUtils.closeStream(in);
	}

	@Override
	public Text createKey() {
		return new Text();
	}

	@Override
	public Text createValue() {
		return new Text();
	}

	@Override
	public long getPos() throws IOException {
		return offset;
	}

	@Override
	public float getProgress() throws IOException {
		return in.getPos() / (float) fileSplit.getLength();
	}

	/**
	 * We override the next method so as to set the UV.sourceIP as key and
	 * the UV.adRevenue as value for each input line.
	 * 
	 * @param key the new input key (to be set inside each next() call)
	 * @param value the new input value (to be set inside each next() call)
	 * @return It return a boolean indicating if a new <key-value> was found.
	 */
	@Override
	public boolean next(Text key, Text value) throws IOException {
		if (offset < splitSize) {
			TableObjectProxy proxy = null;
			int recordLen = 0;
			byte[] data = null;
			try {
				proxy = new TableObjectProxy(tableName);
				recordLen = proxy.getSize();
				if (offset > (splitSize - recordLen))
					return false;
				
				data = BinaryUtils.getBytes(contents, offset, recordLen);
				proxy.setRecord(data);
				proxy.getStringAttributes(value, valueFieldIndex);
				proxy.getStringAttributes(key, keyFieldIndex);
			} catch (Exception e) {
				e.printStackTrace();
			}

			offset += recordLen;
			return true;
		}
		return false;
	}

}