/*
 * Cloud9: A MapReduce Library for Hadoop
 * 
 * Licensed under the Apache License, Version 2.0 (the "License"); you
 * may not use this file except in compliance with the License. You may
 * obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0 
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied. See the License for the specific language governing
 * permissions and limitations under the License.
 */

package apweb;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.net.URI;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.Random;
import java.util.Set;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.KeyValueTextInputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Reducer.Context;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Logger;

/**
 * <p>
 * join all input together by MapJoin with DistributedCache
 * </p>
 * 
 * <ul>
 * <li>[input-path] input path</li>
 * <li>[output-path] output path</li>
 * <li>[num-reducers] number of reducers</li>
 * </ul>
 * 
 * @author Jimmy Lin
 */
public class MapJoinDistributedOverMapReduce extends Configured implements Tool {
	private static final Logger sLogger = Logger.getLogger(MapJoinDistributedOverMapReduce.class);

	// mapper: emits (token, 1) for every word occurrence
	private static class MyMapper extends Mapper<LongWritable, Text, Text, Text> {

		// reuse objects to save overhead of object creation
		Random randomGenerator = new Random();
		private static Hashtable<String, ArrayList<String>> hashTable = new Hashtable<String, ArrayList<String>>();
		String[] tags=null;
		String leftTag = null;
		int leftIndex=0;
		int rightIndex=0;
		private Path[] localFiles = null;
		public void before_map(Context context)
		{
			if( hashTable!=null && hashTable.size()>0)
			{
				System.out.println("There are records in hashtable: " + hashTable.size());
				return;
			}
			//begin to read the small table
//			hashTable.clear();
			Configuration conf = context.getConfiguration();
			try {
				localFiles = DistributedCache.getLocalCacheFiles(conf);
			} catch (IOException e1) {
				// TODO Auto-generated catch block
				e1.printStackTrace();
			}
			if(localFiles==null || localFiles.length<=0)
			{
				System.out.println("distributedCache files not found");
				return;
			}
			else
			{
				System.out.println("distributedCache files size: " + localFiles.length);
			}
			
			rightIndex = conf.getInt("rightIndex", 0);

			try {
				/*
				 * read all files from local node
				 */
				System.out.println("distributedcache files:" + localFiles.length);
				FileReader freader = null;
				BufferedReader bufReader = null;
				for(int i=1; i < localFiles.length; i++)
				{
					System.out.println("file:"+i+"\t"+localFiles[i]);
					//begin to read the file into hashtable
					freader = new FileReader(localFiles[i].toString());
					bufReader = new BufferedReader(freader);
					String line= bufReader.readLine();
					while(line!=null)
					{
						//
						String[] tmp = line.trim().split("\t");
						if(hashTable.containsKey(tmp[rightIndex]))
						{
							hashTable.get(tmp[rightIndex]).add(line.trim());
						}
						else
						{
							ArrayList<String> tmplist = new ArrayList<String>();
							tmplist.add(line.trim());
							hashTable.put(tmp[rightIndex], tmplist);
						}
						line = bufReader.readLine();
					}
					bufReader.close();
					freader.close();
				}
			} catch (IOException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
			
		}

		@Override
		public void map(LongWritable key, Text value, Context context) throws IOException,
				InterruptedException {
			//
			Configuration conf = context.getConfiguration();
			before_map(context);
			if(hashTable==null || hashTable.size()<=0)
			{
//				System.out.println("hashTable is unavailible.");
				return;
			}
			else
			{
				System.out.println("hashtable size:" + hashTable.size());
			}
			tags = conf.get("tags").split(",");
			leftTag = tags[0].trim();
			leftIndex = conf.getInt("leftIndex", 0);
			rightIndex = conf.getInt("rightIndex", 0);

			String[] valuelist = value.toString().split("\t");//column1,column2,column3,tag
			
			//begin to join
			String tmpKey = "BadKey";
			if(valuelist[valuelist.length-1].trim().equals(leftTag))
			{
				tmpKey = valuelist[leftIndex];
				if(!hashTable.containsKey(tmpKey))
					return;
				ArrayList<String> records = hashTable.get(tmpKey);
				for(String leftrecord:records)
				{
					context.write(new Text(tmpKey), new Text(ToolsCollections.combine(value.toString().trim(), leftrecord.trim())));
				}
			}
			else
			{
				return;
			}
		}

	}

	private static int printUsage() {
		System.out.println("usage: [largeTable] [smallTable] [leftIndex,rightIndex] [leftTag,rightTag] [outputpath] ");
		ToolRunner.printGenericCommandUsage(System.out);
		return -1;
	}

	/**
	 * Runs this tool.
	 */
	public int run(String[] args) throws Exception {
		if (args.length != 5) {
			printUsage();
			return -1;
		}
		
//		JobConf job = new JobConf();
	    


		String largeInput = args[0];
		String smallInput = args[1];
		int leftIndex = Integer.parseInt((args[2].split(","))[0]);
		int rightIndex = Integer.parseInt((args[2].split(","))[1]);
		String leftTag = (args[3].split(","))[0];
		String rightTag = (args[3].split(","))[1];
		
		String output = args[4];


		sLogger.info("Tool: MapJoinDistributedOverMapReduce");
		sLogger.info(" - largeInput path: " + largeInput);
		sLogger.info(" - smallInput path: " + smallInput);
		sLogger.info(" - index: " + leftIndex+"\t"+rightIndex);
		sLogger.info(" - tags: " + leftTag+"\t"+rightTag);
		sLogger.info(" - output: " + output);

		Configuration conf = new Configuration();
		
//		conf.setStrings("tags", leftTag, rightTag);
		conf.set("tags", args[3]);
		conf.set("index", args[2]);


		
		/*
		 * set the distributed cache
		 */
		FileSystem fs = FileSystem.get(conf);;
		FileStatus[] fst = fs.listStatus(new Path(smallInput));
		DistributedCache.createSymlink(conf);
		for(int i=1; i < fst.length; i++)//i=0 references to the file .log
		{
			System.out.println("Add file into cache: " + fst[i].getPath().toString());
			DistributedCache.addCacheFile(fst[i].getPath().toUri(), conf);
//			DistributedCache.addCacheArchive(fst[i].getPath().toUri(), conf);
		}
		System.out.println("Working path: " + fs.getWorkingDirectory());
		
//	    DistributedCache.addCacheArchive(new URI("/myapp/map.zip"), conf);
//	    DistributedCache.addFileToClassPath(new Path("/myapp/mylib.jar"), conf);
//	    DistributedCache.addCacheArchive(new URI("/myapp/mytar.tar"), conf);
//	    DistributedCache.addCacheArchive(new URI("/myapp/mytgz.tgz"), conf);
//	    DistributedCache.addCacheArchive(new URI("/myapp/mytargz.tar.gz"), conf);
	    
		Job job = new Job(conf, "MapJoinDistributedOverMapReduce");
		job.setJarByClass(MapJoinDistributedOverMapReduce.class);

		FileInputFormat.addInputPath(job, new Path(largeInput));//add input path
		FileOutputFormat.setOutputPath(job, new Path(output));
		
		job.setOutputKeyClass(Text.class);
		job.setOutputValueClass(Text.class);

		job.setMapperClass(MyMapper.class);
		job.setNumReduceTasks(0);
//		job.setCombinerClass(MyReducer.class);
//		job.setReducerClass(MyReducer.class);

		// Delete the output directory if it exists already
		Path outputDir = new Path(output);
		FileSystem.get(conf).delete(outputDir, true);

		long startTime = System.currentTimeMillis();
		job.waitForCompletion(true);
		sLogger.info("Job TestMapReduce Finished in " + (System.currentTimeMillis() - startTime) / 1000.0
				+ " seconds");

		return 0;
	}

	/**
	 * Dispatches command-line arguments to the tool via the
	 * <code>ToolRunner</code>.
	 */
	public static void main(String[] args) throws Exception {
		int res = ToolRunner.run(new Configuration(), new MapJoinDistributedOverMapReduce(), args);
		System.exit(res);
	}
}
