import java.io.IOException;
import java.util.*;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.io.*;
import org.apache.hadoop.mapred.*;
import org.apache.hadoop.util.*;
import java.io.File;

/**
 * This class runs a series of MapReduce jobs via Hadoop, analyticially
 * finding a page with the longest shortest path to the Jesus article in
 * Wikipedia.
 *
 * @author Ian Voysey
 * @author Ryan Hofler
 * @version 0.1
 */
public class Jesus {

    private static int MAX_ITS = 12;
    private static final int NUMRED = 20;

    /**
     * This creates a job to produce the adjacency list, and then MAX_ITS many
     * passes through the DBFS algorithm. It produces
     *
     * @param args command line arguments: this should be input output
     * @throws Exception
     */
    public static void main(String[] args) throws IOException {
        if(args.length < 2){
            System.out.println("Usage: Jesus <input dir> <output dir>");
            System.exit(1);
        }

        if(args.length == 3)
            MAX_ITS = Integer.parseInt(args[2]);

        // Step 1: Build the adjacency list
        Init(args[0], args[1]+""+0);

        // Steps 2 - MAX_ITS: Run the steps of distributed DFS
        int x;
        for(x = 0; x < MAX_ITS; x++)
	    DBFS(args[1]+x, args[1]+(x+1));
	
        // Step MAX_ITS+1: Sort and output the path data
        Finalize(args[1]+x, args[1]);
    }

    /**
     * Runs a MapReduce job.
     * 
     * @param name the name of the job
     * @param input the input location
     * @param output the output location
     * @param inputFormat the format of the input
     * @param Map the class to use as a mapper
     * @param Red the class to use as a reducer
     * @param numRed the number of reducers
     * @throws IOException if an IO exception is encountered
     */
    private static void runJob(String name, String input, String output,
			       Class<? extends InputFormat> inputFormat, Class<? extends Mapper> Map,
			       Class<? extends Reducer> Red, Class outputKey, int numRed) throws IOException {
	
        JobConf conf = new JobConf(Jesus.class);
        conf.setJobName(name);
	
        conf.setOutputKeyClass(outputKey);
        conf.setOutputValueClass(Text.class);
	
        conf.setMapperClass(Map);
        conf.setCombinerClass(Red);
        conf.setReducerClass(Red);

        conf.setNumReduceTasks(numRed);

        conf.setInputFormat(inputFormat);
        conf.setOutputFormat(TextOutputFormat.class);

        FileInputFormat.setInputPaths(conf, new Path(input));
        FileOutputFormat.setOutputPath(conf, new Path(output));

        JobClient.runJob(conf);
    }
    
    /**
     * Runs a MapReduce job to build the adjacency list
     *
     * @param input the input directory
     * @param output the output directory
     */
    private static void Init(String input, String output) throws IOException{
        runJob("haysoos_Init", input, output, XmlInputFormat.class,
            JesusMapper.class, JesusReducer.class, Text.class, NUMRED);
    }

    /**
     * Runs a MapReduce job for the distributed DFS steps
     *
     * @param input the input directory
     * @param output the output directory
     */
    private static void DBFS(String input, String output) throws IOException{
        runJob("haysoos_DBFS", input, output, TextInputFormat.class, 
            DBFSMapper.class, DBFSReducer.class, Text.class, NUMRED);
    }

    /**
     * Runs a MapReduce job for the finalizing step
     *
     * @param input the input directory
     * @param output the output directory
     */
    private static void Finalize(String input, String output) throws IOException{
        runJob("haysoos_Finalize", input, output, TextInputFormat.class, 
            FinalMapper.class, FinalReducer.class, IntWritable.class, MAX_ITS+1);
    }
}
