/*
 * Cloud9: A MapReduce Library for Hadoop
 * 
 * Licensed under the Apache License, Version 2.0 (the "License"); you
 * may not use this file except in compliance with the License. You may
 * obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0 
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied. See the License for the specific language governing
 * permissions and limitations under the License.
 */

package apweb;

import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.Random;
import java.util.Set;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.KeyValueTextInputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Reducer.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger;

/**
 * <p>
 * join all input together 
 * </p>
 * 
 * <ul>
 * <li>[input-path] input path</li>
 * <li>[output-path] output path</li>
 * <li>[num-reducers] number of reducers</li>
 * </ul>
 * 
 * @author Jimmy Lin
 */
public class RepartitionJoinOverRandomData extends Configured implements Tool {
	private static final Logger sLogger = Logger.getLogger(RepartitionJoinOverRandomData.class);

	// mapper: emits (token, 1) for every word occurrence
	private static class MyMapper extends Mapper<LongWritable, Text, Text, Text> {

		// reuse objects to save overhead of object creation
		Random randomGenerator = new Random();
		String[] tags=null;
		String leftTag = null;
		String rightTag = null;
		String[] indexs=null;
		int rightIndex=0;
		
		@Override
		public void map(LongWritable key, Text value, Context context) throws IOException,
				InterruptedException {
			//
			Configuration conf = context.getConfiguration();
			tags = conf.get("tags").split(",");
			indexs = conf.get("indexs").split(",");
			
			String[] valuelist = value.toString().split("\t");//column1,column2,column3,tag
			int i = getIndexOfTag(tags, valuelist[valuelist.length-1]);
			String tmpKey = valuelist[Integer.parseInt(indexs[i])];
			context.write(new Text(tmpKey), value );
		}
		public int getIndexOfTag(String[] taglist, String tag)
		{
			for(int i=0; i < taglist.length; i++)
			{
				if(taglist[i].equals(tag))
					return i;
			}
			return -1;
		}
	}
	
	private static class MyReducer extends Reducer<Text, Text, Text, Text> {

		// reuse objects
		private Hashtable<String, ArrayList<String>> Tables = new Hashtable<String, ArrayList<String>>();
		private ArrayList<String> keySequence = new ArrayList<String>();
		String[] tags=null;
		@Override
		public void reduce(Text key, Iterable<Text> values,
				Context context) throws IOException, InterruptedException {
			// sum up values
			Configuration conf = context.getConfiguration();
			Hashtable<String, Integer> TableIndex = new Hashtable<String, Integer>();
			Tables.clear();
			TableIndex.clear();
			keySequence.clear();
			tags = conf.get("tags").split(",");
			for(int i=0; i < tags.length; i++)
			{
				keySequence.add(tags[i]);
			}
			String tag = null;
			String[] valueArray=null;
			
			
			Iterator<Text> iter = values.iterator();
			String value=null;
			while (iter.hasNext()) {
				value = iter.next().toString();
				valueArray = value.split("\t");
				tag = valueArray[valueArray.length-1];
				if(!TableIndex.containsKey(tag))
				{
					TableIndex.put(tag, new Integer(0));
//					System.out.println("found:"+tag);
//					keySequence.add(tag);
				}
				if(!Tables.containsKey(tag))
				{
					ArrayList<String> tmp = new ArrayList<String>();
					Tables.put(tag, tmp);
				}
				Tables.get(tag).add(value);
			}
			if(keySequence==null || keySequence.size()<=0)
				return;
			if(TableIndex==null || TableIndex.size()<=0)
				return;
			if(Tables==null || Tables.size()<=0)
				return;
			//begin to genrated the result and omit
			int outTupleSize = getJoinResultSize(keySequence, Tables);
//			System.out.println("tablesSize:"+Tables.size());
//			System.out.println("TableIndexSize:"+TableIndex.size());
			System.out.println("There will be tuples: " + outTupleSize);
			if(outTupleSize<=0)
				return;
//			for(String k:keySequence)
//			{
//				System.out.println("Key:"+k+"\tsize:"+Tables.get(k).size());
//			}
			for(int i=0; i < outTupleSize; i++)
			{
				String out = getOneJoinResult(keySequence, TableIndex, Tables);
				context.write(key, new Text(out));
				computeTheNextIndexForTable(keySequence, TableIndex, Tables);
			}
			//begin to do the ardinarty


		}
		private String getOneJoinResult(ArrayList<String> namelist, Hashtable<String, Integer> tableIndex, Hashtable<String, ArrayList<String>> tuples)
		{
			String left = tuples.get(namelist.get(0)).get(tableIndex.get(namelist.get(0)).intValue());
			for(int i=1; i < namelist.size(); i++)
			{
				String tableName = namelist.get(i);
				ArrayList<String> tableRecords = tuples.get(tableName);
				int recordIndex= tableIndex.get(tableName).intValue();
				String right=tableRecords.get(recordIndex);
				left = ToolsCollections.combine(left, right);
			}
			return left;
		}
		private int getJoinResultSize(ArrayList<String>namelist, Hashtable<String,ArrayList<String>> tuples)
		{
			if(tuples==null||tuples.size()<=0)
				return 0;

			int allsize=1;
			for(String name: namelist)
			{
				if(!tuples.containsKey(name))
					return 0;
				allsize = allsize * tuples.get(name).size();
			}
			return allsize;
		}
		private void computeTheNextIndexForTable(ArrayList<String> namelist, Hashtable<String, Integer> tableIndex, Hashtable<String, ArrayList<String>> tuples)
		{
			//increase the first item
			int firstindex = tableIndex.get(namelist.get(namelist.size()-1));
			tableIndex.remove(namelist.get(namelist.size()-1));
			tableIndex.put(namelist.get(namelist.size()-1), new Integer(firstindex+1));
			for(int i=namelist.size()-1; i >=0; i--)
			{
				String tableName = namelist.get(i);
				if(tableIndex.get(tableName).intValue()>=tuples.get(tableName).size())
				{
					if(i==0)
						break;
					int oldvalue = tableIndex.get(namelist.get(i-1));
					tableIndex.remove(namelist.get(i-1));
					tableIndex.put(namelist.get(i-1), new Integer(oldvalue+1));
					//
					tableIndex.remove(tableName);
					tableIndex.put(tableName, new Integer(0));
				}
				else
				{
					break;
				}
			}
		}
	}

	

	/**
	 * Creates an instance of this tool.
	 */
	public RepartitionJoinOverRandomData() {
	}

	private static int printUsage() {
		System.out.println("usage: [inputlist seperated by,] [taglist seperated by,] [indexlist seperated by ,] [outputpath] [reduceNumber] ");
		ToolRunner.printGenericCommandUsage(System.out);
		return -1;
	}

	/**
	 * Runs this tool.
	 */
	public int run(String[] args) throws Exception {
		if (args.length != 5) {
			printUsage();
			return -1;
		}

		String inputlist = args[0];
		String taglist = args[1];
		String indexs = args[2];
		String outputPath = args[3];
		int reduceTasks = Integer.parseInt(args[4]);

		sLogger.info("Tool: RepartitionJoinOverRandomData");
		sLogger.info(" - input path: " + inputlist);
		sLogger.info(" - taglist: " + taglist);
		sLogger.info(" - indexlist: " + indexs);
		sLogger.info(" - number of reduceTasks: " + reduceTasks);

		Configuration conf = new Configuration();
		
//		conf.setStrings("tags", leftTag, rightTag);
		conf.set("tags", taglist);
		conf.set("indexs", indexs);


		Job job = new Job(conf, "RepartitionJoinOverRandomData");
		job.setJarByClass(RepartitionJoinOverRandomData.class);

		job.setNumReduceTasks(reduceTasks);

		FileInputFormat.setInputPaths(job, inputlist);//add input path
		FileOutputFormat.setOutputPath(job, new Path(outputPath));

		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(Text.class);

		job.setMapperClass(MyMapper.class);
//		job.setCombinerClass(MyReducer.class);
		job.setReducerClass(MyReducer.class);

		// Delete the output directory if it exists already
		Path outputDir = new Path(outputPath);
		FileSystem.get(conf).delete(outputDir, true);

		long startTime = System.currentTimeMillis();
		job.waitForCompletion(true);
		sLogger.info("Job TestMapReduce Finished in " + (System.currentTimeMillis() - startTime) / 1000.0
				+ " seconds");

		return 0;
	}

	/**
	 * Dispatches command-line arguments to the tool via the
	 * <code>ToolRunner</code>.
	 */
	public static void main(String[] args) throws Exception {
		int res = ToolRunner.run(new Configuration(), new RepartitionJoinOverRandomData(), args);
		System.exit(res);
	}
}
