/**
 * 
 */
package edu.cornell.cs.lsi.mapreduce.pass2;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.StringTokenizer;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

/**
 * This class implements the Second Pass of MapReduce task. This pass will read in all the points(on the boundary) and their root vertices.
 * The Reducer task will compute the connected components, with the key vertices being on the boundary. 
 * @author poornima
 *
 */
public class MapReduceSecondPass {
	
	/**
	 * Name of the MapReduce job
	 */
	private static final String SECOND_PASS = "secondPass";
	
	/**
	 * Union find implementation
	 */
	//private static UnionFindImpl ufImpl = new UnionFindImpl();	

	/**
	 * Extending the <code>MapReduceBase</code> class and implementing the <code>Mapper</code>.
	 * @author poornima
	 */
    public static class Map extends Mapper<Object, Text, Text, Text> {
    	
    /**
     * Overriding the map() method. The mapper passes the input points as it is to the reducer.
     * input to the mapper in the form: <x1><space><y1><space><x2><space><y2><space>
     */
     @Override	
     public void map(Object key, Text value, Context context
     ) throws IOException, InterruptedException {
    	 // read the line and tokenize
        String line = value.toString();
        StringTokenizer tokenizer = new StringTokenizer(line);
        List<String> tokens = new ArrayList<String>();
        while (tokenizer.hasMoreTokens()) {
        	tokens.add(tokenizer.nextToken());        
        }
        // as of now outputting the input without any change in the format
        Text outputKey = new Text(tokens.get(0)+" "+tokens.get(1));
        Text outputValue = new Text(tokens.get(2)+" "+tokens.get(3));  
        System.out.println("--------- MAPPER: key:"+outputKey+" value:"+outputValue);
        context.write(outputKey,outputValue);
      }      
    }
    
    /**
     * Extending the <code>MapReduceBase</code> class and implementing the <code>Reducer</code>.
     * @author poornima
     *
     */
    public static class Reduce extends Reducer<Text, Text, Text, Text> {
    	
      /**
       * Override the Reducer method.
       */
      @Override
      public void reduce(Text key, Iterable<Text> values, 
              Context context
              ) throws IOException, InterruptedException {
    	  UnionFindImpl ufImpl = new UnionFindImpl();
    	  System.out.println("------- DEBUG: Reducer starts: size of UF table : "+ufImpl.getComponentTable().size());
    	  // read the key and tokenize to extract the key vertex
        StringTokenizer tokenizer = new StringTokenizer(key.toString());
        List<String> keyTokens = new ArrayList<String>();
        while(tokenizer.hasMoreTokens()){
        	keyTokens.add(tokenizer.nextToken());
        }
        
        Point keyPoint = new Point(Double.parseDouble(keyTokens.get(0)),Double.parseDouble(keyTokens.get(1)));
        //System.out.println("---------- key: ("+keyTokens.get(0)+","+keyTokens.get(1)+")");
     // read the key and tokenize to extract the root vertex
        keyTokens = new ArrayList<String>();
        for(Text value : values) {
        	tokenizer = new StringTokenizer(value.toString());
        	while(tokenizer.hasMoreTokens()){
            	keyTokens.add(tokenizer.nextToken());
            }
        	Point rootPt = new Point(Double.parseDouble(keyTokens.get(0)),Double.parseDouble(keyTokens.get(1)));
        	System.out.println("--- vertex:("+keyPoint.getX()+","+keyPoint.getY()+")   root:("+rootPt.getX()+","+rootPt.getY()+")");
        	// do union find
        	ufImpl.doUnionFind(keyPoint, rootPt);         
        }
        
        Text outputKey = new Text(String.valueOf(keyPoint.getX())+" "+String.valueOf(keyPoint.getY()));
        // fetch the new root for the key point and output it
        Point newRoot = ufImpl.getRoot(keyPoint);
        Text outputValue = new Text(String.valueOf(newRoot.getX())+" "+String.valueOf(newRoot.getY()));
        
        context.write(outputKey, outputValue);
        System.out.println("----------------- display the table ----------------");
        ufImpl.displayMap();
        System.out.println("----------------- display the table ----------------");
        System.out.println("------- DEBUG: Reducer ends: size of UF table : "+ufImpl.getComponentTable().size());
      }
    }

    public static void main(String[] args) throws Exception {
    	if(args == null || args.length != 2){
    		System.out.println("Please pass the input and theo output folder names.");
    		return;
    	}
    	
    	Configuration conf = new Configuration();
        Job job = new Job(conf, SECOND_PASS);
        job.setJarByClass(MapReduceSecondPass.class);
        job.setMapperClass(Map.class);
        //job.setCombinerClass(IntSumReducer.class);
        job.setReducerClass(Reduce.class);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(Text.class);
        FileInputFormat.addInputPath(job, new Path(args[0]));
        FileOutputFormat.setOutputPath(job, new Path(args[1]));
        System.exit(job.waitForCompletion(true) ? 0 : 1);
        }
}