/**
 * 
 */
package mapreduce4j;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URISyntaxException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ExecutorCompletionService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;
import org.apache.log4j.Logger;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.Mapper.Context;


/**
 * The actual Map Reduce implementation
 * This is a very simple implementation that launches multiple threads for the Map
 * and then performs and multithreaded sort of the Map output before firing at a single
 * Reducer at the moment.  In the future this will support multiple reduce operations in 
 * parallel.
 * 
 * @author tim
 */
public class JobExecutor implements Runnable {
	protected Logger logger = Logger.getLogger(JobExecutor.class.getName());
	protected int submissionCount = 0;
	protected Job job;
	
	// TODO CONFIGURIFY all this ja 
	// 1 meg chunks
	protected static long chunkSize=1024*1024;
	// 100k chunk
	// protected static long chunkSize=1024*100;
	protected static char lineTerminator = '\n';  // uhmmm...
	protected static int numMapperWorkers=10;	
	
	/**
	 * @param job to submit
	 * @param inCurrentThread true if to run in this thread, false if to spawn and return immediately
	 */
	public void execute(Job job, boolean inCurrentThread) {
		logger.info("Executing job[" + job.getName() + "] in current thread[" + inCurrentThread + 
				"].  Total submission count[" + ++submissionCount+ "]");
		this.job = job;
		if (inCurrentThread) { 
			run();
		} else {
			Thread t = new Thread(this);
			t.start();
		}
	}

	/**
	 * The real Map Reduce functionality
	 * Currently this will launch a single reduce function only
	 */
	@SuppressWarnings("unchecked")
	public void run() {
		
		try {
			// Java OS temporary directory is different on each OS, but why shouldn't we use it
			logger.debug("Using the System temporary directory: " + System.getProperty("java.io.tmpdir"));
			File tempDirectory = new File(System.getProperty("java.io.tmpdir"));
			
			// this is the pool of workers that we will block on awaiting them to finish before 
			// launching the reducers
			ThreadPoolExecutor mapExecutorService = (ThreadPoolExecutor)Executors.newFixedThreadPool(numMapperWorkers);
			CompletionService mapCompletionService = new ExecutorCompletionService(mapExecutorService);
			
			// the running count of files created - used in the mapper output file names also
			int mapperCount = 0;
			
			// split up the input files and submit them to the mapper pool
			for (File input : job.getInputFiles()) {
				logger.info("Reading input file[" + input.getAbsolutePath()+ "]");
				
				FileInputStream fis = new FileInputStream(input);
				FileChannel fcin = fis.getChannel();
				
				// now we split up the input and send it to mappers
				// we do this by seeking forward by the split size and then using the
				// configured InputFormat to read until the end of the next record.
				// (This differs from Hadoop of course, since Hadoop has already divided the
				// data into chunks as it went in to HDFS).  
				// Once we have calculated the split, we create a new Mapper, assign it the split
				// and submit it to the queue.
				InputFormat inputFormat = job.getInputFormat().newInstance();
				
				long start = 0;
				long end = chunkSize;
				
				while (start < fcin.size()) {
					// work out the bytes for this chunk
					long offset = inputFormat.bytesUntilRecord(fcin, end);
					// start a map from start - (end+offset)
					logger.info("Mapping from: " + start + " - " + (end+offset) + " (" + (end+offset-start) + " bytes)");
					Mapper<? extends Writable, ? extends Writable, ? extends Writable, ? extends Writable> mapper = job.getMapperClass().newInstance();
					ByteBuffer bb = ByteBuffer.allocate((int)(end+offset-start));
					fcin.read(bb, start);
					// As Mary Popping tells us: "Start at the very beginning... it's a very good place to start..." etc etc
					bb.rewind();
					
					// create the output writer in the Java tmp directory
					//RecordWriter writer = new FileRecordWriter(tempDirectory + "/part-" + mapperCount++);
					RecordWriter writer = new SortingFileRecordWriter(tempDirectory + "/part-" + mapperCount++);
					
					// create the mapper, submit it for execution in the queue
					Context mapContext = mapper.new Context(job.getConfiguration(), inputFormat.createRecordReader(bb), writer);
					mapCompletionService.submit(new CallableMapper(mapper, mapContext));
					
					start = end + offset;
					end = start + chunkSize;
				}
				
				fcin.close();
				fis.close();
			}
			
			// stop allowing submission
			mapExecutorService.shutdown();
			
			// wait for the maps to finish
			while (!mapExecutorService.isTerminated()) {
				long remaining = mapExecutorService.getTaskCount() - mapExecutorService.getCompletedTaskCount();
				logger.info("Waiting for [" + remaining + "] maps to finish");
				// sleep for 250 msecs or 25msec per map remaining, whichever is higher
				if (remaining > 10)
					Thread.sleep(25 * remaining);
				else
					Thread.sleep(250);
			}	
			
			logger.info("Map stage finished");
			
			// perform the shuffle and sort
			// well... since we now sort on Map output with the SortingFileRecordWriter... 
			// we don't do a shuffle and sort, but simply pass to a single reducer
			
			// since only working single reducer, no need to create a submitable one yet
			Reducer reducer = job.getReducerClass().newInstance();
			
			// the following is BS for now...
			List<RecordReader> readers = new LinkedList<RecordReader>();
			for (int i=0; i<mapperCount; i++) {
				readers.add(initialiseReader(new File(tempDirectory + "/part-" + i)));
			}
			KeyValuesIterator kvi = new KeyValuesIterator(readers);
			
			// start the reduce
			logger.info("Starting the Reduce stage");
			reducer.run(reducer.new Context(kvi));
			
			
		} catch (Exception e) {
			logger.error(e.getMessage(), e);
		}
	}
	
	// BS for now
	private TextIntRecordReader initialiseReader(File input) throws URISyntaxException, FileNotFoundException, IOException {
		TextIntRecordReader r = new TextIntRecordReader();
		FileInputStream fis=new FileInputStream(input);
		FileChannel fcin=fis.getChannel();
		ByteBuffer buffer = ByteBuffer.allocate((int)fcin.size());
		fcin.read(buffer);
		buffer.rewind();
		r.initialize(buffer);
		return r;
	}
}