/**
 * Software License, Version 1.0
 * 
 * Copyright 2003 The Trustees of Indiana University.  All rights reserved.
 * 
 *
 *Redistribution and use in source and binary forms, with or without 
 *modification, are permitted provided that the following conditions are met:
 *
 *1) All redistributions of source code must retain the above copyright notice,
 * the list of authors in the original source code, this list of conditions and
 * the disclaimer listed in this license;
 *2) All redistributions in binary form must reproduce the above copyright 
 * notice, this list of conditions and the disclaimer listed in this license in
 * the documentation and/or other materials provided with the distribution;
 *3) Any documentation included with all redistributions must include the 
 * following acknowledgement:
 *
 *"This product includes software developed by the Community Grids Lab. For 
 * further information contact the Community Grids Lab at 
 * http://communitygrids.iu.edu/."
 *
 * Alternatively, this acknowledgement may appear in the software itself, and 
 * wherever such third-party acknowledgments normally appear.
 * 
 *4) The name Indiana University or Community Grids Lab or NaradaBrokering, 
 * shall not be used to endorse or promote products derived from this software 
 * without prior written permission from Indiana University.  For written 
 * permission, please contact the Advanced Research and Technology Institute 
 * ("ARTI") at 351 West 10th Street, Indianapolis, Indiana 46202.
 *5) Products derived from this software may not be called NaradaBrokering, 
 * nor may Indiana University or Community Grids Lab or NaradaBrokering appear
 * in their name, without prior written permission of ARTI.
 * 
 *
 * Indiana University provides no reassurances that the source code provided 
 * does not infringe the patent or any other intellectual property rights of 
 * any other entity.  Indiana University disclaims any liability to any 
 * recipient for claims brought by any other entity based on infringement of 
 * intellectual property rights or otherwise.  
 *
 *LICENSEE UNDERSTANDS THAT SOFTWARE IS PROVIDED "AS IS" FOR WHICH NO 
 *WARRANTIES AS TO CAPABILITIES OR ACCURACY ARE MADE. INDIANA UNIVERSITY GIVES
 *NO WARRANTIES AND MAKES NO REPRESENTATION THAT SOFTWARE IS FREE OF 
 *INFRINGEMENT OF THIRD PARTY PATENT, COPYRIGHT, OR OTHER PROPRIETARY RIGHTS. 
 *INDIANA UNIVERSITY MAKES NO WARRANTIES THAT SOFTWARE IS FREE FROM "BUGS", 
 *"VIRUSES", "TROJAN HORSES", "TRAP DOORS", "WORMS", OR OTHER HARMFUL CODE.  
 *LICENSEE ASSUMES THE ENTIRE RISK AS TO THE PERFORMANCE OF SOFTWARE AND/OR 
 *ASSOCIATED MATERIALS, AND TO THE PERFORMANCE AND VALIDITY OF INFORMATION 
 *GENERATED USING SOFTWARE.
 */
package cgl.hadoop.apps.cap3;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.SequenceFile.CompressionType;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
/**
 * Cap3 Data analysis using Hadoop MapReduce.
 * This program demonstrated a usage of "map-only" operation to execute
 * a data analysis application on a collection of data files.
 * 
 * Cap3 is a gene sequencing program which consumes a .fsa file and 
 * produces several output files along with the standard out.
 * 
 * The data is placed in a shared file system (or can be placed on all the local disks)
 * and the file names are written to HDFS. For hadoop, the data file names becomes the 
 * data.
 * 
 * Hadoop executes each map task by passing a data file name as the value parameter.
 * Map task execute Cap3 program (written in C) and save the standard output into a 
 * file. It can also be used to copy these output files to a predefined location.
 * 
 * @author Jaliya Ekanayake (jekanaya@cs.indiana.edu)
 * 03/03/2009
 *
 */
public class Cap3Analysis extends Configured implements Tool {

	public static String prop_output_dir = "prop_out_dir";
	public static String prop_exec_name = "prop_exec_name";
	public static String prop_cap3_dir = "prop_cap3_dir";
	public static String prop_cmd_args = "prop_cmd_args";
	public static String prop_sep_pattern = "prop_sep_pattern";
	public static String sharp = "#";

	private String getPatternSeperatedStringFromStringArray(String[] array,
			String pattern) {
		int len = array.length;
		StringBuffer sb = new StringBuffer();
		for (int i = 0; i < len; i++) {
			if (i < len - 1) {
				sb.append(array[i] + pattern);
			} else {
				sb.append(array[i]);
			}
		}
		return sb.toString();
	}
/**
 * Launch the MapReduce computation.
 * This method first, remove any previous working directories and create a new one
 * Then the data (file names) is copied to this new directory and launch the 
 * MapReduce (map-only though) computation.
 * @param numMapTasks - Number of map tasks.
 * @param numReduceTasks - Number of reduce tasks =0.
 * @param cap3Dir - The directory where the Cap3 program is.
 * @param execName - Name of the executable.
 * @param dataDir - Directory where the data is located.
 * @param outputDir - Output directory to place the output.
 * @param cmdArgs - These are the command line arguments to the Cap3 program.
 * @throws Exception - Throws any exception occurs in this program.
 */
	void launch(int numMapTasks, int numReduceTasks, String cap3Dir,
			String execName, String dataDir, String outputDir, String cmdArgs) throws Exception {

		Configuration conf = new Configuration();
		Job job = new Job(conf, "cap3-data-analysis");

		// First get the file system handler, delete any previous files, add the
		// files and write the data to it, then pass its name as a parameter to
		// job
		Path hdMainDir = new Path("cap3-mr");
		FileSystem fs = FileSystem.get(conf);
		fs.delete(hdMainDir, true);

		Path hdInputDir = new Path(hdMainDir, "data");

		if (!fs.mkdirs(hdInputDir)) {
			throw new IOException("Mkdirs failed to create "
					+ dataDir.toString());
		}

		List<String[]> fileLists = getSubListsOfFiles(dataDir, numMapTasks);

		int count = 0;
		String dataFiles = null;
		for (String[] files : fileLists) {
			dataFiles = getPatternSeperatedStringFromStringArray(files, sharp);
			Path vFile = new Path(hdInputDir, "data_file" + count);
			SequenceFile.Writer vWriter = SequenceFile.createWriter(fs, conf,
					vFile, IntWritable.class, Text.class, CompressionType.NONE);
			vWriter.append(new IntWritable(count), new Text(dataFiles));
			vWriter.close();
			count++;
		}

		System.out.println("INFO: Wrote data file names to the HDFS ");
		Path hdOutDir = new Path(hdMainDir, "out");

		// Starting the data analysis.
		Configuration jc = job.getConfiguration();

		jc.set(prop_output_dir, outputDir);
		jc.set(prop_exec_name, execName);
		jc.set(prop_cap3_dir, cap3Dir);
		jc.set(prop_cmd_args, cmdArgs);
		jc.set(prop_sep_pattern, sharp);

		FileInputFormat.setInputPaths(job, hdInputDir);
		FileOutputFormat.setOutputPath(job, hdOutDir);

		job.setJarByClass(Cap3Analysis.class);
		job.setMapperClass(Cap3Map.class);
		job.setOutputKeyClass(IntWritable.class);
		job.setOutputValueClass(Text.class);
		job.setInputFormatClass(SequenceFileInputFormat.class);
		job.setOutputFormatClass(SequenceFileOutputFormat.class);
		job.setNumReduceTasks(0);
		long startTime = System.currentTimeMillis();

		int exitStatus = job.waitForCompletion(true) ? 0 : 1;
		System.out.println("Job Finished in "
				+ (System.currentTimeMillis() - startTime) / 1000.0
				+ " seconds");
		System.exit(exitStatus);
	}

	public int run(String[] args) throws Exception {
		if (args.length < 5) {
			System.err
					.println("Usage: Cap3Analysis <Cap3 dir> <Executable name> <Data dir> <Output dir> <Number of map tasks>");
			ToolRunner.printGenericCommandUsage(System.err);
			return -1;
		}
		String cap3Dir = args[0];
		String execName = args[1];
		String dataDir = args[2];
		String outputDir = args[3];
		String cmdArgs = "-p 95 -o 49 -t 100";
		int numMapTasks = Integer.parseInt(args[4]);
		int numReduceTasks = 0;// We don't need reduce here.

		launch(numMapTasks, numReduceTasks, cap3Dir, execName, dataDir,
				outputDir, cmdArgs);

		return 0;
	}

	public static void main(String[] argv) throws Exception {

		int res = ToolRunner.run(new Configuration(), new Cap3Analysis(), argv);
		System.exit(res);
	}


	/**
	 * Get a list of files from a directory and groups them. The number of
	 * groups is specified as an input parameter.
	 * 
	 * @param dataDir
	 *            - Directory where the data is available.
	 * @param numGroups
	 *            - How many groups.
	 * @return - A List of array of Strings.
	 * @throws IOException
	 */
	public static List<String[]> getSubListsOfFiles(String dataDir,
			int numGroups) throws IOException {

		File dir = new File(dataDir);
		String files[] = dir.list(); // Assume that the directory contains only
		// the data files.
		int perGroup = files.length / numGroups;
		int perGropuPlusOne = perGroup + 1;
		int remainder = files.length % numGroups;
		// String results[] = new String[numGroups];
		List<String[]> results = new ArrayList<String[]>();
		String[] group = null;
		int index = 0;

		if (files == null) {
			return null;
		} else {
			int start = 0;
			int end = 0;

			for (int i = 0; i < numGroups; i++) {
				start = end;
				if (remainder != 0 && i < remainder) {
					end = start + perGropuPlusOne;
				} else {
					end = start + perGroup;
				}
				group = new String[end - start];
				index = 0;
				for (int j = start; j < end; j++) {
					group[index] = dataDir + File.separator + files[j];
					index++;
				}
				results.add(group);
			}
		}
		return results;
	}
}
