/*************************************************************************************
* 	 Copyright (C) 2010 by Information Systems Group, Saarland University  			*
*    http://infosys.cs.uni-saarland.de												*
* 	 																				*
* 	 This file is part of Hadoop++.												 	*
*																					*
*    Hadoop++ is free software: you can redistribute it and/or modify				*
*    it under the terms of the GNU Lesser General Public License as published by	*
*    the Free Software Foundation, either version 3 of the License, or				*
*    (at your option) any later version.											*
*																					*
*    Hadoop++ is distributed in the hope that it will be useful,					*
*    but WITHOUT ANY WARRANTY; without even the implied warranty of					*
*    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the					*
*    GNU Lesser General Public License for more details.							*
*																					*
*    You should have received a copy of the GNU Lesser General Public License		*
*    along with Hadoop++.  If not, see <http://www.gnu.org/licenses/>.				*
*************************************************************************************/
package unisb.cs.core.binary.converter;

import java.io.IOException;
import java.util.Iterator;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Partitioner;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;

import unisb.cs.core.binary.utils.BinaryUtils;
import unisb.cs.data.tables.TableObjectProxy;

/**
 * This class loads the text data as binary. Following is how the binary data is formatted:
 * 
 *  ______________________________________________________________________________________________________________
 * |             |         |                               |              										  |
 * | Record Size | Lineage | Lengths of VARCHAR attributes | Attribute values (in the same sequence as in schema) |
 * |  1 byte     |  1 Bit  | sum_i (ceil(log2 varchar_i))  |		size of (fixed length + varchar) attributes	  |
 * |_____________|_________|_______________________________|______________________________________________________|
 * 
 * The Record Size tells the number of bytes in the record.
 * The Lineage bit is used to identify the relation in case of co-grouped data.
 * The lengths of VARCHAR attributes are stored in the same sequence as they appear in the schema.
 * Finally, the attribute values are stored, again the same sequence as they appear in the schema.
 * Note, that the record (excluding the record size byte) is aligned to the next byte boundary.
 * 
 * The Mappers processing such data need to:
 * 	1. First, read the record size byte and then the actual record.
 *  2. Identify the relation from the lineage.
 *  3. Read the actual lengths of VARCHAR attributes in that relation.
 *  4. Read and unpack the actual values of referenced attributes.
 * 
 */
public class BinaryConverter extends Configured implements Tool {

	/**
	 * Partitioner to ensure that the input data splits
	 * get evenly distributed across all reducers.
	 *
	 */
	public static class FirstPartitioner implements Partitioner<Text, Text> {
		public void configure(JobConf job) {
		}

		/**
		 * Round-robin partitioning on the split identifier.
		 * (First 8 bytes in the key contain the split identifier)
		 */
		public int getPartition(Text key, Text value, int numPartitions) {
			return (int) (BinaryUtils.getLong(key.getBytes(), 0) % numPartitions);
		}
	}

	/**
	 * Key comparator to preserve the input data ordering.
	 * i.e. do not mix tuples across splits nor change the order of tuples in the dataset.
	 *
	 */
	public static class KeyComparator extends WritableComparator {
		protected KeyComparator() {
			super(Text.class, true);
		}

		/**
		 * Compare first the split identifier followed by the tuple offset within the split.
		 * (First 8 bytes in key contain the split identifier, following 8 bytes the tuple offset)
		 */
		@SuppressWarnings("unchecked")
		@Override
		public int compare(WritableComparable w1, WritableComparable w2) {
			Text w1Text = (Text) w1;
			Text w2Text = (Text) w2;
			int cmp = compareBytes(w1Text.getBytes(), 0, 8, w2Text.getBytes(), 0, 8);
			if (cmp != 0) {
				return cmp;
			}
			return compareBytes(w1Text.getBytes(), 8, 8, w2Text.getBytes(), 8, 8);
		}
	}

	/**
	 * Group comparator to ensure that the entire input split ends up in the same reduce call.
	 * This is needed because since the binary data size is different, we need to mark the split boundaries.
	 *
	 */
	public static class GroupComparator extends WritableComparator {
		protected GroupComparator() {
			super(Text.class, true);
		}

		/**
		 * Compare only the split identifiers.
		 * (First 8 bytes in the key contain the split identifier)
		 */
		@SuppressWarnings("unchecked")
		@Override
		public int compare(WritableComparable w1, WritableComparable w2) {
			return compareBytes(((Text) w1).getBytes(), 0, 8, ((Text) w2).getBytes(), 0, 8);
		}
	}

	/**
	 * The Map class splits the incoming tuple using value delimiter,
	 * converts the text data into binary, and outputs the binary data.
	 *
	 */
	public static class TextMap extends MapReduceBase implements Mapper<Text, Text, Text, Text> {
		/** 
		 * The value delimiter in the input text 
		 * TODO: Get me as input from console!
		 */
		//private static final String VALUE_DELIMITER = "|";
		
		/** The pattern to tokenize the incoming data tuples */
		//private final Pattern pattern = Pattern.compile("\\" + VALUE_DELIMITER);
		private String tableName;
		/**
		 * The map function
		 * @param key composite key containing the split identifier and the tuple offset
		 * @param value delimited tuple attribute values
		 */
		public void configure(JobConf job){
			tableName = job.get("table.name");
		}
		
		//map��������key��splitID+offset��value��һ�����
		//����У�key���ֲ��䣬value��ɽ������һ��record�Ĵ��ֽ�����
		public void map(Text key, Text value, OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
			try {
				TableObjectProxy proxy = new TableObjectProxy(tableName);
				
				if (value != null){
					proxy.setRecord(value.toString());
					output.collect(key, new Text(proxy.getBytes()));
				}
			}
			catch (Exception e) {
					e.printStackTrace();
			}
		}
	}

	
	/**
	 * The Reduce class outputs the binary split data,
	 * and appends the split with a footer containing the split size.
	 * 
	 *  _____________________________________________________________________
	 * |													  |				 |
	 * |                Split Data 		                      | Split Footer |
	 * |				 (Binary)							  |   (8 bytes)  |
	 * |______________________________________________________|______________|
	 * 
	 * To process such data:
	 *  1. First, read the split footers in the data file (in reverse order)
	 *  2. For each split footer encountered, create the corresponding data split
	 *     to be processed by the mapper. 
	 *
	 */
	//GroupComparator��ʹ�ñ�֤����split�µ���ݶ�����һ��reduce�д���
	public static class TextReduce extends MapReduceBase implements Reducer<Text, Text, Text, Text> {

		/**
		 * Each reduce call receives one full split.
		 * The reduce function streams the input data to HDFS.
		 * Additionally, it counts the size of the split data 
		 * and emits it as split footer in the end. 
		 */
		public void reduce(Text key, Iterator<Text> values, OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
			long size = 0;
			while (values.hasNext()) {
				Text value = values.next();
				size += value.getLength();
				output.collect(key, value);
			}
			Text splitSize = new Text(BinaryUtils.toBytes(size));
			output.collect(key, splitSize);
			//System.out.println("Size:" + size);
		}
	}

	public int run(String[] args) throws Exception {
		JobConf conf = new JobConf(getConf(), BinaryConverter.class);
		conf.setJobName(this.getClass().getName());

		conf.setPartitionerClass(FirstPartitioner.class);				// sh
		conf.setOutputKeyComparatorClass(KeyComparator.class);			// cmp
		conf.setOutputValueGroupingComparator(GroupComparator.class);	// grp

		conf.setMapperClass(TextMap.class);							// map
		conf.setReducerClass(TextReduce.class);						// reduce
		conf.setInputFormat(BinaryConverterInputFormat.class);		
		conf.setOutputFormat(BinaryConverterOutputFormat.class);
		
		BinaryConverterInputFormat.setInputPaths(conf, args[1]); 				// input path
		BinaryConverterOutputFormat.setOutputPath(conf, new Path(args[2])); 	// output path
		
		conf.set("table.name", args[0]);

		/**
		 * The maximum possible tuple size (in terms of string length)
		 * of the input data. This is used by the input formatter to split
		 * the data at record boundaries i.e. each mapper gets an integral
		 * number of tuples to process.
		 */
		conf.set(BinaryConverterInputFormat.MAX_TUPLE_SIZE, args[3]);

		conf.setMapOutputKeyClass(Text.class);
		conf.setMapOutputValueClass(Text.class);
		conf.setOutputKeyClass(Text.class);
		conf.setOutputValueClass(Text.class);

//		int numreducers = 20;	//TODO: why?
//		for (int i = 0; i < args.length; i++) {
//			if ("-r".equals(args[i])) {
//				numreducers = Integer.parseInt(args[++i]);
//			}
//		}
		
		conf.setNumReduceTasks(Integer.parseInt(args[4]));
		JobClient.runJob(conf);
		return 0;
	}

	public static void main(String[] args) throws Exception {
		// check input args
		if(args.length < 3){
			System.out.println("Need the following command line parameters:");
			System.out.println("  <input_path> <output_path> <max_tuple_size> [-r <num_reducers>]");
			System.out.println("Also, note that this class makes the following assumptions:");
			System.out.println("1. Input data file(s) is(are) in ascii text.");
			System.out.println("2. Each line in the input file(s) corresponds to one table row.");
			System.out.println("3. Attributes within a row are pipe (|) delimited.");
			System.exit(1);
		}
		
		// run the map-reduce job
		long t1 = System.currentTimeMillis();
		int res = ToolRunner.run(new Configuration(), new BinaryConverter(), args);
		long t2 = System.currentTimeMillis();
		
		// print the job runtime
		System.out.println((float) (t2 - t1) / 1000);
		
		System.exit(res);
	}
}
